启动配置

简要概述

用于 “workflow-controller” 应用的配置。

配置示例

在 cli 启动配置

spec:
  selector:
    matchLabels:
      app: argo-server
  template:
    metadata:
      labels:
        app: argo-server
    spec:
      containers:
      - args:
        - server
        - "--auth-mode=sso"
        - "--secure=false"
        env: []
        image: quay.io/argoproj/argocli:latest
        name: argo-server

在 configmap 中配置

# This file describes the config settings available in the workflow controller configmap
apiVersion: v1
kind: ConfigMap
metadata:
  name: workflow-controller-configmap
  namespace: argo
data:
  # 在 2.7+ 版本之后,可以不需要 "config: |" 这个 key 包含了。
  #
  # instanceID is a label selector to limit the controller's watch to a specific instance. It
  # contains an arbitrary value that is carried forward into its pod labels, under the key
  # workflows.argoproj.io/controller-instanceid, for the purposes of workflow segregation. This
  # enables a controller to only receive workflow and pod events that it is interested about,
  # in order to support multiple controllers in a single cluster, and ultimately allows the
  # controller itself to be bundled as part of a higher level application. If omitted, the
  # controller watches workflows and pods that *are not* labeled with an instance id.
  # 在单 k8s 集群上多 argo 实例下区分各实例监听的资源
  instanceID: my-ci-controller

  # Namespace is a label selector filter to limit the controller's watch to a specific namespace
  namespace: my-namespace

  # Parallelism limits the max total parallel workflows that can execute at the same time
  # (available since Argo v2.3). Controller must be restarted to take effect.
  parallelism: "10"

  # Limit the maximum number of incomplete workflows in a namespace.
  # Intended for cluster installs that are multi-tenancy environments, to prevent too many workflows in one
  # namespace impacting others.
  # >= v3.2
  namespaceParallelism: "10"

  # Globally limits the rate at which pods are created.
  # This is intended to mitigate flooding of the Kubernetes API server by workflows with a large amount of
  # parallel nodes.
  resourceRateLimit: |
    limit: 10
    burst: 1    

  # Whether or not to emit events on node completion. These can take a up a lot of space in
  # k8s (typically etcd) resulting in errors when trying to create new events:
  # "Unable to create audit event: etcdserver: mvcc: database space exceeded"
  # This config item allows you to disable this.
  # (since v2.9)
  nodeEvents: |
    enabled: true    

  # Whether or not to emit events on workflow status changes. These can take a up a lot of space in
  # k8s (typically etcd), see nodeEvents above.
  # This config item allows you to disable this.
  # (since v3.6)
  workflowEvents: |
    enabled: true    

  # uncomment following lines if workflow controller runs in a different k8s cluster with the
  # workflow workloads, or needs to communicate with the k8s apiserver using an out-of-cluster
  # kubeconfig secret
  # kubeConfig:
  #   # name of the kubeconfig secret, may not be empty when kubeConfig specified
  #   secretName: kubeconfig-secret
  #   # key of the kubeconfig secret, may not be empty when kubeConfig specified
  #   secretKey: kubeconfig
  #   # mounting path of the kubeconfig secret, default to /kube/config
  #   mountPath: /kubeconfig/mount/path
  #   # volume name when mounting the secret, default to kubeconfig
  #   volumeName: kube-config-volume

  links: |
    # Adds a button to the workflow page. E.g. linking to you logging facility.
    - name: Example Workflow Link
      scope: workflow
      url: http://logging-facility?namespace=${metadata.namespace}&workflowName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
    # Adds a button next to the pod.  E.g. linking to you logging facility but for the pod only.
    - name: Example Pod Link
      scope: pod
      url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
    - name: Pod Logs
      scope: pod-logs
      url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
    - name: Event Source Logs
      scope: event-source-logs
      url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
    - name: Sensor Logs
      scope: sensor-logs
      url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
    # Adds a button to the bottom right of every page to link to your organisation help or chat.
    - name: Get help
      scope: chat
      url: http://my-chat
    # Adds a button to the top of workflow view to navigate to customized views.
    - name: Completed Workflows
      scope: workflow-list
      url: http://workflows?label=workflows.argoproj.io/completed=true    

  # Columns are custom columns that will be exposed in the Workflow List View.
  # (available since Argo v3.5)
  columns: |
    # Adds a column to the Workflow List View
    - # The name of this column, e.g., "Workflow Completed".
      name: Workflow Completed
      # The type of this column, "label" or "annotation".
      type: label
      # The key of the label or annotation, e.g., "workflows.argoproj.io/completed".
      key: workflows.argoproj.io/completed    

  # uncomment following lines if you want to change navigation bar background color
  # navColor: red

  # artifactRepository defines the default location to be used as the artifact repository for
  # container artifacts.
  # 工作 pod 生成的制品存放仓库
  artifactRepository: |
    # archiveLogs will archive the main container logs as an artifact
    # 各 pod 执行的日志是否归档至仓库
    archiveLogs: true

    s3:
      # Use the corresponding endpoint depending on your S3 provider:
      #   AWS: s3.amazonaws.com
      #   GCS: storage.googleapis.com
      #   Minio: my-minio-endpoint.default:9000
      endpoint: s3.amazonaws.com
      bucket: my-bucket
      region: us-west-2
      # insecure will disable TLS. Primarily used for minio installs not configured with TLS
      insecure: false
      # keyFormat is a format pattern to define how artifacts will be organized in a bucket.
      # It can reference workflow metadata variables such as workflow.namespace, workflow.name,
      # pod.name. Can also use strftime formating of workflow.creationTimestamp so that workflow
      # artifacts can be organized by date. If omitted, will use `{{workflow.name}}/{{pod.name}}`,
      # which has potential for have collisions.
      # The following example pattern organizes workflow artifacts under a "my-artifacts" sub dir,
      # then sub dirs for year, month, date and finally workflow name and pod.
      # e.g.: my-artifacts/2018/08/23/my-workflow-abc123/my-workflow-abc123-1234567890
      keyFormat: "my-artifacts\
        /{{workflow.creationTimestamp.Y}}\
        /{{workflow.creationTimestamp.m}}\
        /{{workflow.creationTimestamp.d}}\
        /{{workflow.name}}\
        /{{pod.name}}"
      # The actual secret object (in this example my-s3-credentials), should be created in every
      # namespace where a workflow needs to store its artifacts to S3. If omitted,
      # attempts to use IAM role to access the bucket (instead of accessKey/secretKey).
      accessKeySecret:
        name: my-s3-credentials
        key: accessKey
      secretKeySecret:
        name: my-s3-credentials
        key: secretKey
      # If this is set to true, argo workflows will use AWS SDK default credentials provider chain. This will allow things like
      # IRSA and any of the authentication methods that the golang SDK uses in it's default chain.
      # If you are using IRSA on AWS, and set this option to true, you will also need to modify Argo-Server Deployment with
      # `spec.template.spec.securityContext.fsGroup: 65534` configuration. This is required for IRSA to be able to access
      # `/var/run/secrets/eks.amazonaws.com/serviceaccount/token` file, and authenticate with AWS.
      useSDKCreds: false

      encryptionOptions:
        # If this is set to true, SSE-S3 encryption will be used to store objects
        # unless kmsKeyId or serverSideCustomerKeySecret is set
        enableEncryption: false
        # A valid kms key id. If this value is set, the object stored in s3 will be encrypted with SSE-KMS
        # Note: You cannot set both kmsKeyId and serverSideCustomerKeySecret
        # kmsKeyId: ''
        # Allows you to set a json blob of simple key value pairs. See
        # https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context
        # for more information
        # kmsEncryptionContext: ''
        # The actual secret object (in this example my-s3-credentials),
        # should be created when using a custom secret to encrypt objects in using SSE-C
        # Note: You cannot set both kmsKeyId and serverSideCustomerKeySecret
        # serverSideCustomerKeySecret:
        #  name: my-s3-credentials
        #  key: secretKey    

  # The command/args for each image, needed when the command is not specified and the emissary executor is used.
  # https://argo-workflows.readthedocs.io/en/latest/workflow-executors/#emissary-emissary
  images: |
    argoproj/argosay:v2:
      cmd: [/argosay]
    docker/whalesay:latest:
      cmd: [/bin/bash]    

  # Defaults for main containers. These can be overridden by the template.
  # <= v3.3 only `resources` are supported.
  # >= v3.4 all fields are supported, including security context.
  mainContainer: |
    imagePullPolicy: IfNotPresent
    resources:
      requests:
        cpu: 0.1
        memory: 64Mi
      limits:
        cpu: 0.5
        memory: 512Mi
    securityContext:
      allowPrivilegeEscalation: false
      capabilities:
        drop:
        - ALL
      readOnlyRootFilesystem: true
      runAsNonRoot: true
      runAsUser: 1000    

  # executor controls how the init and wait container should be customized
  # (available since Argo v2.3)
  executor: |
    # 在启动 pod init 阶段执行的容器,自定义
    # image: quay.io/argoproj/argoexec:v3.5.7
    image: registry.cn-hangzhou.aliyuncs.com/kube-image-repo/argoexec:v3.5.7
    imagePullPolicy: IfNotPresent
    resources:
      requests:
        cpu: 0.1
        memory: 64Mi
      limits:
        cpu: 0.5
        memory: 512Mi
    # args & env allows command line arguments and environment variables to be appended to the
    # executor container and is mainly used for development/debugging purposes.
    args:
    - --loglevel
    - debug
    - --gloglevel
    - "6"
    env:
    # ARGO_TRACE enables some tracing information for debugging purposes. Currently it enables
    # logging of S3 request/response payloads (including auth headers)
    - name: ARGO_TRACE
      value: "1"    

  # metricsConfig controls the path and port for prometheus metrics. Metrics are enabled and emitted on localhost:9090/metrics
  # by default.
  metricsConfig: |
    # Enabled controls metric emission. Default is true, set "enabled: false" to turn off
    enabled: true
    # Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics"
    path: /metrics
    # Port is the port where metrics are emitted. Default is "9090"
    port: 8080
    # MetricsTTL sets how often custom metrics are cleared from memory. Default is "0", metrics are never cleared. Histogram metrics are never cleared.
    metricsTTL: "10m"
    # IgnoreErrors is a flag that instructs prometheus to ignore metric emission errors. Default is "false"
    ignoreErrors: false
    # Use a self-signed cert for TLS
    # >= 3.6: default true
    secure: true
    # Options for configuring individual metrics
    options:
      pod_missing:
        disable: true
      cronworkflows_triggered_total:
        disabledAttributes:
          - name
      k8s_request_duration:
        histogramBuckets: [ 1.0, 2.0, 10.0 ]
    # >= 3.6. Which temporality to use for OpenTelemetry. Default is "Cumulative"
    temporality: Delta

    # DEPRECATED: Legacy metrics are now removed, this field is ignored
    disableLegacy: false    

  # telemetryConfig controls the path and port for prometheus telemetry. Telemetry is enabled and emitted in the same endpoint
  # as metrics by default, but can be overridden using this config.
  telemetryConfig: |
    enabled: true
    path: /telemetry
    port: 8080
    secure: true  # Use a self-signed cert for TLS, default false    

  # enable persistence using postgres
  persistence: |
    connectionPool:
      maxIdleConns: 100
      maxOpenConns: 0
      connMaxLifetime: 0s # 0 means connections don't have a max lifetime
    #  if true node status is only saved to the persistence DB to avoid the 1MB limit in etcd
    nodeStatusOffLoad: false
    # save completed workloads to the workflow archive
    archive: false
    # the number of days to keep archived workflows (the default is forever)
    archiveTTL: 180d
    # skip database migration if needed.
    # skipMigration: true

    # LabelSelector determines the workflow that matches with the matchlabels or matchrequirements, will be archived.
    # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
    archiveLabelSelector:
      matchLabels:
        workflows.argoproj.io/archive-strategy: "always"

    # Optional name of the cluster I'm running in. This must be unique for your cluster.
    clusterName: default
    postgresql:
      host: localhost
      port: 5432
      database: postgres
      tableName: argo_workflows
      # the database secrets must be in the same namespace of the controller
      userNameSecret:
        name: argo-postgres-config
        key: username
      passwordSecret:
        name: argo-postgres-config
        key: password
      ssl: true
      # sslMode must be one of: disable, require, verify-ca, verify-full
      # you can find more information about those ssl options here: https://godoc.org/github.com/lib/pq
      sslMode: require

    # Optional config for mysql:
    # mysql:
    #   host: localhost
    #   port: 3306
    #   database: argo
    #   tableName: argo_workflows
    #   userNameSecret:
    #     name: argo-mysql-config
    #     key: username
    #   passwordSecret:
    #     name: argo-mysql-config
    #     key: password    

  # PodSpecLogStrategy enables the logging of pod specs in the controller log.
  # podSpecLogStrategy: |
  #   failedPod: true
  #   allPods: false

  # PodGCGracePeriodSeconds specifies the duration in seconds before a terminating pod is forcefully killed.
  # Value must be non-negative integer. A zero value indicates that the pod will be forcefully terminated immediately.
  # Defaults to the Kubernetes default of 30 seconds.
  podGCGracePeriodSeconds: "60"

  # PodGCDeleteDelayDuration specifies the duration before pods in the GC queue get deleted.
  # Value must be non-negative. A zero value indicates that the pods will be deleted immediately.
  # Defaults to 5 seconds.
  podGCDeleteDelayDuration: 30s

  # adds initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC.
  # initialDelay: 5s

  # Workflow retention by number of workflows
  # retentionPolicy: |
  #   completed: 10
  #   failed: 3
  #   errored: 3

  # Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level
  # See more: docs/default-workflow-specs.md
  workflowDefaults: |
    metadata:
      annotations:
        argo: workflows
      labels:
        foo: bar
    spec:
      ttlStrategy:
        secondsAfterSuccess: 5
      parallelism: 3    

  # SSO Configuration for the Argo server.
  # You must also start argo server with `--auth-mode sso`.
  # https://argo-workflows.readthedocs.io/en/latest/argo-server-auth-mode/
  sso: |
    # This is the root URL of the OIDC provider (required).
    issuer: https://issuer.root.url/
    # Some OIDC providers have alternate root URLs that can be included. These should be reviewed carefully. (optional)
    issuerAlias: https://altissuer.root.url
    # This defines how long your login is valid for (in hours). (optional)
    # If omitted, defaults to 10h. Example below is 10 days.
    sessionExpiry: 240h
    # This is name of the secret and the key in it that contain OIDC client
    # ID issued to the application by the provider (required).
    clientId:
      name: client-id-secret
      key: client-id-key
    # This is name of the secret and the key in it that contain OIDC client
    # secret issued to the application by the provider (required).
    clientSecret:
      name: client-secret-secret
      key: client-secret-key
    # This is the redirect URL supplied to the provider (optional). It must
    # be in the form <argo-server-root-url>/oauth2/callback. It must be
    # browser-accessible. If omitted, will be automatically generated.
    redirectUrl: https://argo-server/oauth2/callback
    # Additional scopes to request. Typically needed for SSO RBAC. >= v2.12
    scopes:
     - groups
     - email
     - profile
    # RBAC Config. >= v2.12
    rbac:
      enabled: false
    # Skip TLS verify, not recommended in production environments. Useful for testing purposes. >= v3.2.4
    insecureSkipVerify: false    

  # workflowRestrictions restricts the Workflows that the controller will process.
  # Current options:
  #   Strict: Only Workflows using "workflowTemplateRef" will be processed. This allows the administrator of the controller
  #     to set a "library" of templates that may be run by its operator, limiting arbitrary Workflow execution.
  #   Secure: Only Workflows using "workflowTemplateRef" will be processed and the controller will enforce
  #     that the WorkflowTemplate that is referenced hasn't changed between operations. If you want to make sure the operator of the
  #     Workflow cannot run an arbitrary Workflow, use this option.
  workflowRestrictions: |
    templateReferencing: Strict    

数据结构

github.com/argoproj/argo-workflows/config/config.go

Config

// Config contains the configuration settings for the workflow controller
type Config struct {

    // NodeEvents configures how node events are emitted
    NodeEvents NodeEvents `json:"nodeEvents,omitempty"`

    // WorkflowEvents configures how workflow events are emitted
    WorkflowEvents WorkflowEvents `json:"workflowEvents,omitempty"`

    // Executor holds container customizations for the executor to use when running pods
    Executor *apiv1.Container `json:"executor,omitempty"`

    // MainContainer holds container customization for the main container
    MainContainer *apiv1.Container `json:"mainContainer,omitempty"`

    // KubeConfig specifies a kube config file for the wait & init containers
    KubeConfig *KubeConfig `json:"kubeConfig,omitempty"`

    // ArtifactRepository contains the default location of an artifact repository for container artifacts
    ArtifactRepository wfv1.ArtifactRepository `json:"artifactRepository,omitempty"`

    // Namespace is a label selector filter to limit the controller's watch to a specific namespace
    Namespace string `json:"namespace,omitempty"`

    // InstanceID is a label selector to limit the controller's watch to a specific instance. It
    // contains an arbitrary value that is carried forward into its pod labels, under the key
    // workflows.argoproj.io/controller-instanceid, for the purposes of workflow segregation. This
    // enables a controller to only receive workflow and pod events that it is interested about,
    // in order to support multiple controllers in a single cluster, and ultimately allows the
    // controller itself to be bundled as part of a higher level application. If omitted, the
    // controller watches workflows and pods that *are not* labeled with an instance id.
    InstanceID string `json:"instanceID,omitempty"`

    // MetricsConfig specifies configuration for metrics emission. Metrics are enabled and emitted on localhost:9090/metrics
    // by default.
    MetricsConfig MetricsConfig `json:"metricsConfig,omitempty"`

    // TelemetryConfig specifies configuration for telemetry emission. Telemetry is enabled and emitted in the same endpoint
    // as metrics by default, but can be overridden using this config.
    TelemetryConfig MetricsConfig `json:"telemetryConfig,omitempty"`

    // Parallelism limits the max total parallel workflows that can execute at the same time
    Parallelism int `json:"parallelism,omitempty"`

    // NamespaceParallelism limits the max workflows that can execute at the same time in a namespace
    NamespaceParallelism int `json:"namespaceParallelism,omitempty"`

    // ResourceRateLimit limits the rate at which pods are created
    ResourceRateLimit *ResourceRateLimit `json:"resourceRateLimit,omitempty"`

    // Persistence contains the workflow persistence DB configuration
    Persistence *PersistConfig `json:"persistence,omitempty"`

    // Links to related apps.
    Links []*wfv1.Link `json:"links,omitempty"`

    // Columns are custom columns that will be exposed in the Workflow List View.
    Columns []*wfv1.Column `json:"columns,omitempty"`

    // WorkflowDefaults are values that will apply to all Workflows from this controller, unless overridden on the Workflow-level
    WorkflowDefaults *wfv1.Workflow `json:"workflowDefaults,omitempty"`

    // PodSpecLogStrategy enables the logging of podspec on controller log.
    PodSpecLogStrategy PodSpecLogStrategy `json:"podSpecLogStrategy,omitempty"`

    // PodGCGracePeriodSeconds specifies the duration in seconds before a terminating pod is forcefully killed.
    // Value must be non-negative integer. A zero value indicates that the pod will be forcefully terminated immediately.
    // Defaults to the Kubernetes default of 30 seconds.
    PodGCGracePeriodSeconds *int64 `json:"podGCGracePeriodSeconds,omitempty"`

    // PodGCDeleteDelayDuration specifies the duration before pods in the GC queue get deleted.
    // Value must be non-negative. A zero value indicates that the pods will be deleted immediately.
    // Defaults to 5 seconds.
    PodGCDeleteDelayDuration *metav1.Duration `json:"podGCDeleteDelayDuration,omitempty"`

    // WorkflowRestrictions restricts the controller to executing Workflows that meet certain restrictions
    WorkflowRestrictions *WorkflowRestrictions `json:"workflowRestrictions,omitempty"`

    // Adds configurable initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC.
    InitialDelay metav1.Duration `json:"initialDelay,omitempty"`

    // The command/args for each image, needed when the command is not specified and the emissary executor is used.
    // https://argo-workflows.readthedocs.io/en/latest/workflow-executors/#emissary-emissary
    Images map[string]Image `json:"images,omitempty"`

    // Workflow retention by number of workflows
    RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"`

    // NavColor is an ui navigation bar background color
    NavColor string `json:"navColor,omitempty"`

    // SSO in settings for single-sign on
    SSO SSOConfig `json:"sso,omitempty"`
}

通过以下代码对配置的 configmap 转换为以上 Config 数据结构:

func parseConfigMap(cm *apiv1.ConfigMap, config *Config) error {
    // The key in the configmap to retrieve workflow configuration from.
    // Content encoding is expected to be YAML.
    rawConfig, ok := cm.Data["config"]
    if ok && len(cm.Data) != 1 {
        return fmt.Errorf("if you have an item in your config map named 'config', you must only have one item")
    }
    if !ok {
        for name, value := range cm.Data {
            if strings.Contains(value, "\n") {
                // this mucky code indents with two spaces
                rawConfig = rawConfig + name + ":\n  " + strings.Join(strings.Split(strings.Trim(value, "\n"), "\n"), "\n  ") + "\n"
            } else {
                rawConfig = rawConfig + name + ": " + value + "\n"
            }
        }
    }
    err := yaml.UnmarshalStrict([]byte(rawConfig), config)
    return err
}

NodeEvents

type NodeEvents struct {
    Enabled   *bool `json:"enabled,omitempty"`
    SendAsPod bool  `json:"sendAsPod,omitempty"`
}

WorkflowEvents

type WorkflowEvents struct {
    Enabled *bool `json:"enabled,omitempty"`
}

*apiv1.Container

Pod Container

KubeConfig

// KubeConfig is used for wait & init sidecar containers to communicate with a k8s apiserver by a outofcluster method,
// it is used when the workflow controller is in a different cluster with the workflow workloads
type KubeConfig struct {
    // SecretName of the kubeconfig secret
    // may not be empty if kuebConfig specified
    SecretName string `json:"secretName"`
    // SecretKey of the kubeconfig in the secret
    // may not be empty if kubeConfig specified
    SecretKey string `json:"secretKey"`
    // VolumeName of kubeconfig, default to 'kubeconfig'
    VolumeName string `json:"volumeName,omitempty"`
    // MountPath of the kubeconfig secret, default to '/kube/config'
    MountPath string `json:"mountPath,omitempty"`
}

wfv1.ArtifactRepository

TODO;

MetricsConfig

type MetricsTemporality string

const (
    MetricsTemporalityCumulative MetricsTemporality = "Cumulative"
    MetricsTemporalityDelta      MetricsTemporality = "Delta"
)

// MetricsConfig defines a config for a metrics server
type MetricsConfig struct {
    // Enabled controls metric emission. Default is true, set "enabled: false" to turn off
    Enabled *bool `json:"enabled,omitempty"`
    // DisableLegacy turns off legacy metrics
    // DEPRECATED: Legacy metrics are now removed, this field is ignored
    DisableLegacy bool `json:"disableLegacy,omitempty"`
    // MetricsTTL sets how often custom metrics are cleared from memory
    MetricsTTL TTL `json:"metricsTTL,omitempty"`
    // Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics"
    Path string `json:"path,omitempty"`
    // Port is the port where metrics are emitted. Default is "9090"
    Port int `json:"port,omitempty"`
    // IgnoreErrors is a flag that instructs prometheus to ignore metric emission errors
    IgnoreErrors bool `json:"ignoreErrors,omitempty"`
    // Secure is a flag that starts the metrics servers using TLS, defaults to true
    Secure *bool `json:"secure,omitempty"`
    // Modifiers configure metrics by name
    Modifiers map[string]MetricModifier `json:"modifiers,omitempty"`
    // Temporality of the OpenTelemetry metrics.
    // Enum of Cumulative or Delta, defaulting to Cumulative.
    // No effect on Prometheus metrics, which are always Cumulative.
    Temporality MetricsTemporality `json:"temporality,omitempty"`
}

ResourceRateLimit

type ResourceRateLimit struct {
    Limit float64 `json:"limit"`
    Burst int     `json:"burst"`
}

PersistConfig

type PersistConfig struct {
    NodeStatusOffload bool `json:"nodeStatusOffLoad,omitempty"`
    // Archive workflows to persistence.
    Archive bool `json:"archive,omitempty"`
    // ArchivelabelSelector holds LabelSelector to determine workflow persistence.
    ArchiveLabelSelector *metav1.LabelSelector `json:"archiveLabelSelector,omitempty"`
    // in days
    ArchiveTTL     TTL               `json:"archiveTTL,omitempty"`
    ClusterName    string            `json:"clusterName,omitempty"`
    ConnectionPool *ConnectionPool   `json:"connectionPool,omitempty"`
    PostgreSQL     *PostgreSQLConfig `json:"postgresql,omitempty"`
    MySQL          *MySQLConfig      `json:"mysql,omitempty"`
    SkipMigration  bool              `json:"skipMigration,omitempty"`
}

TODO;

wfv1.Column

TODO;

wfv1.Workflow

Workflow

PodSpecLogStrategy

// PodSpecLogStrategy contains the configuration for logging the pod spec in controller log for debugging purpose
type PodSpecLogStrategy struct {
    FailedPod bool `json:"failedPod,omitempty"`
    AllPods   bool `json:"allPods,omitempty"`
}

WorkflowRestrictions

type WorkflowRestrictions struct {
    TemplateReferencing TemplateReferencing `json:"templateReferencing,omitempty"`
}

type TemplateReferencing string

const (
    TemplateReferencingStrict TemplateReferencing = "Strict"
    TemplateReferencingSecure TemplateReferencing = "Secure"
)

Image

type Image struct {
    Entrypoint []string `json:"entrypoint,omitempty"`
    Cmd        []string `json:"cmd,omitempty"`
}

RetentionPolicy

// Workflow retention by number of workflows
type RetentionPolicy struct {
    Completed int `json:"completed,omitempty"`
    Failed    int `json:"failed,omitempty"`
    Errored   int `json:"errored,omitempty"`
}

SSOConfig

type SSOConfig struct {
    Issuer       string                  `json:"issuer"`
    IssuerAlias  string                  `json:"issuerAlias,omitempty"`
    ClientID     apiv1.SecretKeySelector `json:"clientId"`
    ClientSecret apiv1.SecretKeySelector `json:"clientSecret"`
    RedirectURL  string                  `json:"redirectUrl"`
    RBAC         *RBACConfig             `json:"rbac,omitempty"`
    // additional scopes (on top of "openid")
    Scopes        []string        `json:"scopes,omitempty"`
    SessionExpiry metav1.Duration `json:"sessionExpiry,omitempty"`
    // customGroupClaimName will override the groups claim name
    CustomGroupClaimName string   `json:"customGroupClaimName,omitempty"`
    UserInfoPath         string   `json:"userInfoPath,omitempty"`
    InsecureSkipVerify   bool     `json:"insecureSkipVerify,omitempty"`
    FilterGroupsRegex    []string `json:"filterGroupsRegex,omitempty"`
}

RBACConfig

type RBACConfig struct {
    Enabled bool `json:"enabled,omitempty"`
}



最后修改 2024.11.19: feat: add argo sso rbac (adef0d8)