Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
365 changes: 365 additions & 0 deletions bin/k8s/values-development.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,365 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

texera:
# Container image registry and tag for all Texera services
# Override these to use a different registry or version
imageRegistry: ghcr.io/apache
imageTag: latest

global:
# Required by Bitnami sub-charts (postgresql, minio) to allow custom images
security:
allowInsecureImages: true

# Persistence Configuration
# This controls how Persistent Volumes (PVs) and Persistent Volume Claims (PVCs) are managed
#
# removeAfterUninstall:
# - true: PVCs will be deleted when helm uninstalls the chart
# - false: PVCs will remain after uninstall to preserve the data
persistence:
removeAfterUninstall: true
minioHostLocalPath: ""
postgresqlHostLocalPath: ""

# Part 1: the configuration of Postgres, Minio and LakeFS
postgresql:
image:
repository: groonga/pgroonga
tag: latest
debug: true
auth:
postgresPassword: root_password # for executing init script with superuser
primary:
containerSecurityContext:
# Disabled because groonga/pgroonga needs to write a lock/socket file to /var/run/postgresql
readOnlyRootFilesystem: false
livenessProbe:
initialDelaySeconds: 30 # increase this if the launching of postgresql is slow on the cluster
readinessProbe:
initialDelaySeconds: 30 # increase this if the launching of postgresql is slow on the cluster
resources:
requests:
cpu: "0.25"
memory: "256Mi"
limits:
cpu: "1"
memory: "256Mi"
persistence:
enabled: true
size: 10Gi
storageClass: local-path
existingClaim: "postgresql-data-pvc"
initdb:
scriptsConfigMap: "postgresql-init-script"

minio:
mode: standalone
image:
repository: bitnamilegacy/minio
tag: 2025.3.12-debian-12-r0
resources:
requests:
memory: "256Mi"
limits:
memory: "256Mi"
gateway:
enabled: false
hostname: "" # the url for the minio, e.g. "minio.example.com"
tlsSecretName: "" # e.g. "minio-tls-secret"
auth:
rootUser: texera_minio
rootPassword: password
service:
# In production, use ClusterIP to avoid exposing the minio to the internet
# type: ClusterIP
type: NodePort
nodePorts:
api: 31000
persistence:
enabled: true
size: 20Gi
storageClass: local-path
existingClaim: "minio-data-pvc"

lakefs:
secrets:
authEncryptSecretKey: random_string_for_lakefs
databaseConnectionString: postgres://postgres:root_password@texera-postgresql:5432/texera_lakefs?sslmode=disable
auth:
username: texera-admin
accessKey: AKIAIOSFOLKFSSAMPLES
secretKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
service:
port: 8000
lakefsConfig: |
database:
type: postgres
blockstore:
type: s3
s3:
endpoint: http://texera-minio:9000
pre_signed_expiry: 15m
pre_signed_endpoint: http://localhost:31000
force_path_style: true
credentials:
access_key_id: texera_minio
secret_access_key: password

# Part2: configurations of Texera-related micro services
texeraImages:
pullPolicy: Always

# Example data loader configuration
exampleDataLoader:
enabled: true
imageName: texera-example-data-loader
username: texera
password: texera
datasetDir: datasets
workflowDir: workflows

webserver:
name: webserver
numOfPods: 1 # Number of pods for the Texera deployment
imageName: texera-dashboard-service
service:
type: ClusterIP
port: 8080
resources:
requests:
cpu: 10m
memory: 256Mi
limits:
cpu: 1000m
memory: 1Gi

workflowComputingUnitManager:
name: workflow-computing-unit-manager
numOfPods: 1
serviceAccountName: workflow-computing-unit-manager-service-account
imageName: texera-workflow-computing-unit-managing-service
service:
type: ClusterIP
port: 8888
resources:
requests:
cpu: 10m
memory: 256Mi
limits:
cpu: 1000m
memory: 256Mi

workflowCompilingService:
name: workflow-compiling-service
numOfPods: 1
imageName: texera-workflow-compiling-service
service:
type: ClusterIP
port: 9090
resources:
requests:
cpu: 10m
memory: 256Mi
limits:
cpu: 1000m
memory: 256Mi

fileService:
name: file-service
numOfPods: 1
imageName: texera-file-service
service:
type: ClusterIP
port: 9092
resources:
requests:
cpu: 10m
memory: 256Mi
limits:
cpu: 1000m
memory: 512Mi

configService:
name: config-service
numOfPods: 1
imageName: texera-config-service
service:
type: ClusterIP
port: 9094
resources:
requests:
cpu: 10m
memory: 256Mi
limits:
cpu: 1000m
memory: 256Mi

accessControlService:
name: access-control-service
numOfPods: 1
imageName: texera-access-control-service
service:
type: ClusterIP
port: 9096
resources:
requests:
cpu: 10m
memory: 256Mi
limits:
cpu: 1000m
memory: 256Mi

# headless service for the access of computing units
workflowComputingUnitPool:
createNamespaces: true
# The name of the workflow computing unit pool
name: texera-workflow-computing-unit
# Note: the namespace of the workflow computing unit pool might conflict when there are multiple texera deployments in the same cluster
namespace: texera-workflow-computing-unit-pool
# Max number of resources allocated for computing units
maxRequestedResources:
cpu: 100
memory: 100Gi
nvidiaGpu: 5
imageName: texera-workflow-execution-coordinator
service:
port: 8085
targetPort: 8085

texeraEnvVars:
- name: USER_SYS_ADMIN_USERNAME
value: "texera"
- name: USER_SYS_ADMIN_PASSWORD
value: "texera"
- name: STORAGE_JDBC_USERNAME
value: postgres
- name: USER_SYS_ENABLED
value: "true"
- name: SCHEDULE_GENERATOR_ENABLE_COST_BASED_SCHEDULE_GENERATOR
value: "true"
- name: MAX_WORKFLOW_WEBSOCKET_REQUEST_PAYLOAD_SIZE_KB
value: "64"
- name: MAX_NUM_OF_RUNNING_COMPUTING_UNITS_PER_USER
value: "10"
- name: KUBERNETES_COMPUTING_UNIT_CPU_LIMIT_OPTIONS
value: "2"
- name: KUBERNETES_COMPUTING_UNIT_MEMORY_LIMIT_OPTIONS
value: "2Gi"
- name: KUBERNETES_COMPUTING_UNIT_GPU_LIMIT_OPTIONS
value: "0"
- name: COMPUTING_UNIT_LOCAL_ENABLED
value: "false"
- name: KUBERNETES_COMPUTING_UNIT_ENABLED
value: "true"
- name: KUBERNETES_IMAGE_PULL_POLICY
value: "IfNotPresent"
- name: GUI_WORKFLOW_WORKSPACE_PYTHON_LANGUAGE_SERVER_PORT
value: ""
- name: GUI_WORKFLOW_WORKSPACE_PRODUCTION_SHARED_EDITING_SERVER
value: "true"
- name: GUI_LOGIN_LOCAL_LOGIN
value: "true"
- name: GUI_LOGIN_GOOGLE_LOGIN
value: "true"
- name: GUI_DATASET_SINGLE_FILE_UPLOAD_MAXIMUM_SIZE_MB
value: "1024"
- name: GUI_WORKFLOW_WORKSPACE_EXPORT_EXECUTION_RESULT_ENABLED
value: "true"
- name: GUI_WORKFLOW_WORKSPACE_WORKFLOW_EXECUTIONS_TRACKING_ENABLED
value: "true"
- name: GUI_WORKFLOW_WORKSPACE_ASYNC_RENDERING_ENABLED
value: "true"
- name: COMPUTING_UNIT_SHARING_ENABLED
value: "true"
- name: USER_SYS_INVITE_ONLY
value: "true"
- name: USER_SYS_GOOGLE_CLIENT_ID
value: ""
- name: USER_SYS_GOOGLE_SMTP_GMAIL
value: ""
- name: USER_SYS_GOOGLE_SMTP_PASSWORD
value: ""
- name: USER_SYS_DOMAIN
value: ""

yWebsocketServer:
name: y-websocket-server
replicaCount: 1
image: texera/y-websocket-server:latest


pythonLanguageServer:
name: python-language-server
replicaCount: 1
image: texera/pylsp:latest
imagePullSecret: regcred
resources:
limits:
cpu: "100m"
memory: "100Mi"

# Metrics Server configuration
metrics-server:
enabled: true # set to false if metrics-server is already installed
args:
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --metric-resolution=15s
resources:
requests:
cpu: 200m
memory: 400Mi
rbac:
create: true
serviceAccount:
create: true
priorityClassName: system-cluster-critical

gatewayConfig:
# Routes are available at bin/k8s/templates/gateway-routes.yaml

# The hostname for the Gateway listener (HTTP/HTTPS).
# e.g., "texera.example.com"
hostname: ""

# The name of the cert-manager Issuer or ClusterIssuer to use for obtaining certificates.
# This requires cert-manager to be installed in the cluster.
# You can find available ClusterIssuers with: `kubectl get clusterissuers`
# You can find available Issuers with: `kubectl get issuers -A`
# e.g., "letsencrypt-prod"
issuer: ""

# The Kind of the issuer specified above. Can be "Issuer" or "ClusterIssuer".
# If you found it via `kubectl get clusterissuers`, use "ClusterIssuer".
# If you found it via `kubectl get issuers`, use "Issuer".
# defaults to "Issuer" if not specified.
issuerKind: "Issuer"

# The name of the Secret where the signed certificate should be stored.
# If empty, it defaults to "{{ .Release.Name }}-cert".
# e.g., "texera-tls"
tlsSecretName: ""

# Envoy Gateway Configuration
envoy-gateway:
config:
envoyGateway:
extensionApis:
enableBackend: true
enableEnvoyPatchPolicy: true
Loading