From 8311dd9e816dfc4970ab62eec0b09e7eb757f242 Mon Sep 17 00:00:00 2001 From: Mangirdas Judeikis Date: Tue, 11 Nov 2025 13:53:11 +0200 Subject: [PATCH 1/2] Add production deployment documentation Signed-off-by: Mangirdas Judeikis On-behalf-of: @SAP mangirdas.judeikis@sap.com --- .gitignore | 3 + contrib/production/README.md | 19 + .../cert-manager/certificate-example.yaml | 18 + .../cloudflare-secret.yaml.template | 12 + .../cert-manager/cluster-issuer.yaml.template | 20 + .../etcd-druid/certificate-etcd-issuer.yaml | 30 + ...copybackupstasks.druid.gardener.cloud.yaml | 205 ++ .../etcds.druid.gardener.cloud.yaml | 2123 +++++++++++++++++ .../kcp-comer/certificate-etcd.yaml | 144 ++ .../production/kcp-comer/certificate-kcp.yaml | 30 + .../kcp-comer/etcd-druid-alpha.yaml | 67 + .../production/kcp-comer/etcd-druid-root.yaml | 67 + .../production/kcp-comer/kcp-alpha-shard.yaml | 35 + .../kcp-comer/kcp-front-proxy-internal.yaml | 32 + .../production/kcp-comer/kcp-front-proxy.yaml | 34 + .../production/kcp-comer/kcp-root-shard.yaml | 48 + .../kubeconfig-kcp-admin-internal.yaml | 15 + .../kcp-comer/kubeconfig-kcp-admin.yaml | 15 + .../kcp-dekker/certificate-etcd.yaml | 144 ++ .../kcp-dekker/certificate-kcp.yaml | 30 + .../kcp-dekker/etcd-druid-alpha.yaml | 67 + .../kcp-dekker/etcd-druid-root.yaml | 67 + .../kcp-dekker/kcp-alpha-shard.yaml | 26 + .../kcp-dekker/kcp-front-proxy.yaml | 32 + .../production/kcp-dekker/kcp-root-shard.yaml | 37 + .../kcp-dekker/kubeconfig-kcp-admin.yaml | 15 + .../kcp-vespucci/certificate-etcd.yaml | 144 ++ .../kcp-vespucci/certificate-kcp.yaml | 30 + .../kcp-vespucci/etcd-druid-alpha.yaml | 67 + .../kcp-vespucci/etcd-druid-root.yaml | 67 + .../kcp-vespucci/kcp-alpha-shard.yaml | 37 + .../kcp-vespucci/kcp-front-proxy.yaml | 46 + .../kcp-vespucci/kcp-root-shard.yaml | 48 + .../kcp-vespucci/kubeconfig-kcp-admin.yaml | 15 + .../production/oidc-dex/certificate-dns.yaml | 17 + .../production/oidc-dex/postgres-cluster.yaml | 39 + .../oidc-dex/postgres-database.yaml | 10 + .../production/oidc-dex/values.yaml.template | 68 + .../governance/general-technical-review.md | 2 +- docs/content/contributing/index.md | 2 +- docs/content/setup/.pages | 3 +- docs/content/setup/production/.pages | 8 + docs/content/setup/production/dual-proxy.svg | 5 + docs/content/setup/production/high-level.svg | 5 + .../{production.md => production/index.md} | 63 +- docs/content/setup/production/kcp-comer.md | 179 ++ docs/content/setup/production/kcp-dekker.md | 159 ++ .../setup/production/kcp-deployment-modes.svg | 4 + docs/content/setup/production/kcp-vespucci.md | 164 ++ docs/content/setup/production/overview.md | 136 ++ .../content/setup/production/prerequisites.md | 211 ++ docs/content/setup/production/public.svg | 4 + docs/content/setup/production/self-signed.svg | 4 + docs/scripts/serve-docs.sh | 6 +- 54 files changed, 4871 insertions(+), 7 deletions(-) create mode 100644 contrib/production/README.md create mode 100644 contrib/production/cert-manager/certificate-example.yaml create mode 100644 contrib/production/cert-manager/cloudflare-secret.yaml.template create mode 100644 contrib/production/cert-manager/cluster-issuer.yaml.template create mode 100644 contrib/production/etcd-druid/certificate-etcd-issuer.yaml create mode 100644 contrib/production/etcd-druid/etcdcopybackupstasks.druid.gardener.cloud.yaml create mode 100644 contrib/production/etcd-druid/etcds.druid.gardener.cloud.yaml create mode 100644 contrib/production/kcp-comer/certificate-etcd.yaml create mode 100644 contrib/production/kcp-comer/certificate-kcp.yaml create mode 100644 contrib/production/kcp-comer/etcd-druid-alpha.yaml create mode 100644 contrib/production/kcp-comer/etcd-druid-root.yaml create mode 100644 contrib/production/kcp-comer/kcp-alpha-shard.yaml create mode 100644 contrib/production/kcp-comer/kcp-front-proxy-internal.yaml create mode 100644 contrib/production/kcp-comer/kcp-front-proxy.yaml create mode 100644 contrib/production/kcp-comer/kcp-root-shard.yaml create mode 100644 contrib/production/kcp-comer/kubeconfig-kcp-admin-internal.yaml create mode 100644 contrib/production/kcp-comer/kubeconfig-kcp-admin.yaml create mode 100644 contrib/production/kcp-dekker/certificate-etcd.yaml create mode 100644 contrib/production/kcp-dekker/certificate-kcp.yaml create mode 100644 contrib/production/kcp-dekker/etcd-druid-alpha.yaml create mode 100644 contrib/production/kcp-dekker/etcd-druid-root.yaml create mode 100644 contrib/production/kcp-dekker/kcp-alpha-shard.yaml create mode 100644 contrib/production/kcp-dekker/kcp-front-proxy.yaml create mode 100644 contrib/production/kcp-dekker/kcp-root-shard.yaml create mode 100644 contrib/production/kcp-dekker/kubeconfig-kcp-admin.yaml create mode 100644 contrib/production/kcp-vespucci/certificate-etcd.yaml create mode 100644 contrib/production/kcp-vespucci/certificate-kcp.yaml create mode 100644 contrib/production/kcp-vespucci/etcd-druid-alpha.yaml create mode 100644 contrib/production/kcp-vespucci/etcd-druid-root.yaml create mode 100644 contrib/production/kcp-vespucci/kcp-alpha-shard.yaml create mode 100644 contrib/production/kcp-vespucci/kcp-front-proxy.yaml create mode 100644 contrib/production/kcp-vespucci/kcp-root-shard.yaml create mode 100644 contrib/production/kcp-vespucci/kubeconfig-kcp-admin.yaml create mode 100644 contrib/production/oidc-dex/certificate-dns.yaml create mode 100644 contrib/production/oidc-dex/postgres-cluster.yaml create mode 100644 contrib/production/oidc-dex/postgres-database.yaml create mode 100644 contrib/production/oidc-dex/values.yaml.template create mode 100644 docs/content/setup/production/.pages create mode 100644 docs/content/setup/production/dual-proxy.svg create mode 100644 docs/content/setup/production/high-level.svg rename docs/content/setup/{production.md => production/index.md} (63%) create mode 100644 docs/content/setup/production/kcp-comer.md create mode 100644 docs/content/setup/production/kcp-dekker.md create mode 100644 docs/content/setup/production/kcp-deployment-modes.svg create mode 100644 docs/content/setup/production/kcp-vespucci.md create mode 100644 docs/content/setup/production/overview.md create mode 100644 docs/content/setup/production/prerequisites.md create mode 100644 docs/content/setup/production/public.svg create mode 100644 docs/content/setup/production/self-signed.svg diff --git a/.gitignore b/.gitignore index 2c29279ec2f..3991d00c7b3 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,6 @@ dex/ *.pem index.html +contrib/production/cert-manager/cloudflare-secret.yaml +contrib/production/cert-manager/cluster-issuer.yaml +contrib/production/oidc-dex/values.yaml \ No newline at end of file diff --git a/contrib/production/README.md b/contrib/production/README.md new file mode 100644 index 00000000000..fde416722d9 --- /dev/null +++ b/contrib/production/README.md @@ -0,0 +1,19 @@ +# Production Deployment Assets + +This directory contains assets and configuration files for production deployment of kcp. + +!!! Note: We understand that maintaining static assets in the repository can be challenging. If you have noticed any discrepancies between these assets and the latest version of the kcp - please open an issue or submit a pull request to help us keep them up to date. + +## Usage + +These assets are referenced by the production deployment documentation in `docs/content/setup/production/`. + +Each deployment type (dekker, vespucci, comer) has its own subdirectory with complete configuration files and deployment manifests. + +## Deployment Types + +- **kcp-dekker**: Self-signed certificates, simple single-cluster deployment +- **kcp-vespucci**: External certificates with Let's Encrypt, public shard access +- **kcp-comer**: CDN integration with dual front-proxy configuration + +See the corresponding documentation in `docs/content/setup/production/` for detailed deployment instructions. \ No newline at end of file diff --git a/contrib/production/cert-manager/certificate-example.yaml b/contrib/production/cert-manager/certificate-example.yaml new file mode 100644 index 00000000000..a8a86b7b381 --- /dev/null +++ b/contrib/production/cert-manager/certificate-example.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: dex-tls-cert + namespace: oidc +spec: + # Secret where the certificate will be stored + secretName: dex-tls + + # Reference to the ClusterIssuer + issuerRef: + name: kcp-comerletsencrypt-prod + kind: ClusterIssuer + + # Domains for the certificate + dnsNames: + - auth.example.com \ No newline at end of file diff --git a/contrib/production/cert-manager/cloudflare-secret.yaml.template b/contrib/production/cert-manager/cloudflare-secret.yaml.template new file mode 100644 index 00000000000..ba86d5140c2 --- /dev/null +++ b/contrib/production/cert-manager/cloudflare-secret.yaml.template @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudflare-api-key-secret + namespace: cert-manager +type: Opaque +data: + # Replace with your base64 encoded Cloudflare Global API Key + # Get your API key from: https://dash.cloudflare.com/profile/api-tokens + # Then encode it: echo -n "xxxxxxxxxxxxx" | base64 + api-key: xxxxxxxxxxxx \ No newline at end of file diff --git a/contrib/production/cert-manager/cluster-issuer.yaml.template b/contrib/production/cert-manager/cluster-issuer.yaml.template new file mode 100644 index 00000000000..364324f18e3 --- /dev/null +++ b/contrib/production/cert-manager/cluster-issuer.yaml.template @@ -0,0 +1,20 @@ +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + # You must replace this email address with your own. + email: email@example.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: le-issuer-account-key + solvers: + - dns01: + cloudflare: + email: email@example.com + apiKeySecretRef: + name: cloudflare-api-key-secret + key: api-key \ No newline at end of file diff --git a/contrib/production/etcd-druid/certificate-etcd-issuer.yaml b/contrib/production/etcd-druid/certificate-etcd-issuer.yaml new file mode 100644 index 00000000000..c88e5c7d674 --- /dev/null +++ b/contrib/production/etcd-druid/certificate-etcd-issuer.yaml @@ -0,0 +1,30 @@ +# CA authority for etcd components. +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned + namespace: cert-manager +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-ca + namespace: cert-manager +spec: + secretName: etcd-ca-tls + isCA: true + commonName: etcd-ca + issuerRef: + name: selfsigned + kind: Issuer +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: etcd-ca +spec: + ca: + secretName: etcd-ca-tls \ No newline at end of file diff --git a/contrib/production/etcd-druid/etcdcopybackupstasks.druid.gardener.cloud.yaml b/contrib/production/etcd-druid/etcdcopybackupstasks.druid.gardener.cloud.yaml new file mode 100644 index 00000000000..2df2416a6aa --- /dev/null +++ b/contrib/production/etcd-druid/etcdcopybackupstasks.druid.gardener.cloud.yaml @@ -0,0 +1,205 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: etcdcopybackupstasks.druid.gardener.cloud +spec: + group: druid.gardener.cloud + names: + kind: EtcdCopyBackupsTask + listKind: EtcdCopyBackupsTaskList + plural: etcdcopybackupstasks + singular: etcdcopybackupstask + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: EtcdCopyBackupsTask is a task for copying etcd backups from a + source to a target store. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EtcdCopyBackupsTaskSpec defines the parameters for the copy + backups task. + properties: + maxBackupAge: + description: |- + MaxBackupAge is the maximum age in days that a backup must have in order to be copied. + By default, all backups will be copied. + format: int32 + minimum: 0 + type: integer + maxBackups: + description: MaxBackups is the maximum number of backups that will + be copied starting with the most recent ones. + format: int32 + minimum: 0 + type: integer + podLabels: + additionalProperties: + type: string + description: PodLabels is a set of labels that will be added to pod(s) + created by the copy backups task. + type: object + sourceStore: + description: SourceStore defines the specification of the source object + store provider for storing backups. + properties: + container: + description: Container is the name of the container the backup + is stored at. + type: string + prefix: + description: Prefix is the prefix used for the store. + type: string + provider: + description: Provider is the name of the backup provider. + type: string + secretRef: + description: SecretRef is the reference to the secret which used + to connect to the backup store. + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which the + secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - prefix + type: object + targetStore: + description: TargetStore defines the specification of the target object + store provider for storing backups. + properties: + container: + description: Container is the name of the container the backup + is stored at. + type: string + prefix: + description: Prefix is the prefix used for the store. + type: string + provider: + description: Provider is the name of the backup provider. + type: string + secretRef: + description: SecretRef is the reference to the secret which used + to connect to the backup store. + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which the + secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - prefix + type: object + waitForFinalSnapshot: + description: WaitForFinalSnapshot defines the parameters for waiting + for a final full snapshot before copying backups. + properties: + enabled: + description: Enabled specifies whether to wait for a final full + snapshot before copying backups. + type: boolean + timeout: + description: |- + Timeout is the timeout for waiting for a final full snapshot. When this timeout expires, the copying of backups + will be performed anyway. No timeout or 0 means wait forever. + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+)$ + type: string + required: + - enabled + type: object + required: + - sourceStore + - targetStore + type: object + status: + description: EtcdCopyBackupsTaskStatus defines the observed state of the + copy backups task. + properties: + conditions: + description: Conditions represents the latest available observations + of an object's current state. + items: + description: Condition holds the information about the state of + a resource. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + lastUpdateTime: + description: Last time the condition was updated. + format: date-time + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of the Etcd condition. + type: string + required: + - lastTransitionTime + - lastUpdateTime + - message + - reason + - status + - type + type: object + type: array + lastError: + description: LastError represents the last occurred error. + type: string + observedGeneration: + description: ObservedGeneration is the most recent generation observed + for this resource. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} \ No newline at end of file diff --git a/contrib/production/etcd-druid/etcds.druid.gardener.cloud.yaml b/contrib/production/etcd-druid/etcds.druid.gardener.cloud.yaml new file mode 100644 index 00000000000..442d9ee579d --- /dev/null +++ b/contrib/production/etcd-druid/etcds.druid.gardener.cloud.yaml @@ -0,0 +1,2123 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: etcds.druid.gardener.cloud +spec: + group: druid.gardener.cloud + names: + kind: Etcd + listKind: EtcdList + plural: etcds + singular: etcd + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ready + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Quorate + type: string + - jsonPath: .status.conditions[?(@.type=="AllMembersReady")].status + name: All Members Ready + type: string + - jsonPath: .status.conditions[?(@.type=="BackupReady")].status + name: Backup Ready + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.replicas + name: Cluster Size + priority: 1 + type: integer + - jsonPath: .status.currentReplicas + name: Current Replicas + priority: 1 + type: integer + - jsonPath: .status.readyReplicas + name: Ready Replicas + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: Etcd is the Schema for the etcds API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EtcdSpec defines the desired state of Etcd + properties: + annotations: + additionalProperties: + type: string + type: object + backup: + description: BackupSpec defines parameters associated with the full + and delta snapshots of etcd. + properties: + compression: + description: SnapshotCompression defines the specification for + compression of Snapshots. + properties: + enabled: + type: boolean + policy: + description: CompressionPolicy defines the type of policy + for compression of snapshots. + enum: + - gzip + - lzw + - zlib + type: string + type: object + deltaSnapshotMemoryLimit: + anyOf: + - type: integer + - type: string + description: DeltaSnapshotMemoryLimit defines the memory limit + after which delta snapshots will be taken + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + deltaSnapshotPeriod: + description: DeltaSnapshotPeriod defines the period after which + delta snapshots will be taken + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + deltaSnapshotRetentionPeriod: + description: |- + DeltaSnapshotRetentionPeriod defines the duration for which delta snapshots will be retained, excluding the latest snapshot set. + The value should be a string formatted as a duration (e.g., '1s', '2m', '3h', '4d') + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + enableProfiling: + description: EnableProfiling defines if profiling should be enabled + for the etcd-backup-restore-sidecar + type: boolean + etcdSnapshotTimeout: + description: EtcdSnapshotTimeout defines the timeout duration + for etcd FullSnapshot operation + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + fullSnapshotSchedule: + description: FullSnapshotSchedule defines the cron standard schedule + for full snapshots. + pattern: ^(\*|[1-5]?[0-9]|[1-5]?[0-9]-[1-5]?[0-9]|(?:[1-9]|[1-4][0-9]|5[0-9])\/(?:[1-9]|[1-4][0-9]|5[0-9]|60)|\*\/(?:[1-9]|[1-4][0-9]|5[0-9]|60))\s+(\*|[0-9]|1[0-9]|2[0-3]|[0-9]-(?:[0-9]|1[0-9]|2[0-3])|1[0-9]-(?:1[0-9]|2[0-3])|2[0-3]-2[0-3]|(?:[1-9]|1[0-9]|2[0-3])\/(?:[1-9]|1[0-9]|2[0-4])|\*\/(?:[1-9]|1[0-9]|2[0-4]))\s+(\*|[1-9]|[12][0-9]|3[01]|[1-9]-(?:[1-9]|[12][0-9]|3[01])|[12][0-9]-(?:[12][0-9]|3[01])|3[01]-3[01]|(?:[1-9]|[12][0-9]|30)\/(?:[1-9]|[12][0-9]|3[01])|\*\/(?:[1-9]|[12][0-9]|3[01]))\s+(\*|[1-9]|1[0-2]|[1-9]-(?:[1-9]|1[0-2])|1[0-2]-1[0-2]|(?:[1-9]|1[0-2])\/(?:[1-9]|1[0-2])|\*\/(?:[1-9]|1[0-2]))\s+(\*|[1-7]|[1-6]-[1-7]|[1-6]\/[1-7]|\*\/[1-7])$ + type: string + garbageCollectionPeriod: + description: GarbageCollectionPeriod defines the period for garbage + collecting old backups + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + garbageCollectionPolicy: + description: GarbageCollectionPolicy defines the policy for garbage + collecting old backups + enum: + - Exponential + - LimitBased + type: string + image: + description: Image defines the etcd container image and tag + type: string + leaderElection: + description: LeaderElection defines parameters related to the + LeaderElection configuration. + properties: + etcdConnectionTimeout: + description: EtcdConnectionTimeout defines the timeout duration + for etcd client connection during leader election. + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + reelectionPeriod: + description: ReelectionPeriod defines the Period after which + leadership status of corresponding etcd is checked. + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + type: object + maxBackupsLimitBasedGC: + description: |- + MaxBackupsLimitBasedGC defines the maximum number of Full snapshots to retain in Limit Based GarbageCollectionPolicy + All full snapshots beyond this limit will be garbage collected. + format: int32 + type: integer + port: + description: Port define the port on which etcd-backup-restore + server will be exposed. + format: int32 + type: integer + resources: + description: |- + Resources defines compute Resources required by backup-restore container. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + snapshotCompaction: + description: SnapshotCompaction defines the specification for + compaction of backups. + properties: + eventsThreshold: + description: EventsThreshold defines the threshold for the + number of etcd events before triggering a compaction job + format: int64 + type: integer + resources: + description: |- + Resources defines compute Resources required by compaction job. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + triggerFullSnapshotThreshold: + description: TriggerFullSnapshotThreshold defines the upper + threshold for the number of etcd events before giving up + on compaction job and triggering a full snapshot. + format: int64 + type: integer + type: object + store: + description: Store defines the specification of object store provider + for storing backups. + properties: + container: + description: Container is the name of the container the backup + is stored at. + type: string + prefix: + description: Prefix is the prefix used for the store. + type: string + provider: + description: Provider is the name of the backup provider. + type: string + secretRef: + description: SecretRef is the reference to the secret which + used to connect to the backup store. + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - prefix + type: object + tls: + description: TLSConfig hold the TLS configuration details. + properties: + clientTLSSecretRef: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + serverTLSSecretRef: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + tlsCASecretRef: + description: SecretReference defines a reference to a secret. + properties: + dataKey: + description: DataKey is the name of the key in the data + map containing the credentials. + type: string + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - serverTLSSecretRef + - tlsCASecretRef + type: object + type: object + x-kubernetes-validations: + - message: etcd.spec.backup.garbageCollectionPeriod must be greater + than etcd.spec.backup.deltaSnapshotPeriod + rule: '!(has(self.deltaSnapshotPeriod) && has(self.garbageCollectionPeriod)) + || duration(self.deltaSnapshotPeriod).getSeconds() < duration(self.garbageCollectionPeriod).getSeconds()' + etcd: + description: EtcdConfig defines the configuration for the etcd cluster + to be deployed. + properties: + authSecretRef: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which the + secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + clientPort: + format: int32 + type: integer + clientService: + description: ClientService defines the parameters of the client + service that a user can specify + properties: + annotations: + additionalProperties: + type: string + description: Annotations specify the annotations that should + be added to the client service + type: object + labels: + additionalProperties: + type: string + description: Labels specify the labels that should be added + to the client service + type: object + trafficDistribution: + description: |- + TrafficDistribution defines the traffic distribution preference that should be added to the client service. + More info: https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-distribution + enum: + - PreferClose + type: string + type: object + clientUrlTls: + description: ClientUrlTLS contains the ca, server TLS and client + TLS secrets for client communication to ETCD cluster + properties: + clientTLSSecretRef: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + serverTLSSecretRef: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + tlsCASecretRef: + description: SecretReference defines a reference to a secret. + properties: + dataKey: + description: DataKey is the name of the key in the data + map containing the credentials. + type: string + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - serverTLSSecretRef + - tlsCASecretRef + type: object + defragmentationSchedule: + description: DefragmentationSchedule defines the cron standard + schedule for defragmentation of etcd. + pattern: ^(\*|[1-5]?[0-9]|[1-5]?[0-9]-[1-5]?[0-9]|(?:[1-9]|[1-4][0-9]|5[0-9])\/(?:[1-9]|[1-4][0-9]|5[0-9]|60)|\*\/(?:[1-9]|[1-4][0-9]|5[0-9]|60))\s+(\*|[0-9]|1[0-9]|2[0-3]|[0-9]-(?:[0-9]|1[0-9]|2[0-3])|1[0-9]-(?:1[0-9]|2[0-3])|2[0-3]-2[0-3]|(?:[1-9]|1[0-9]|2[0-3])\/(?:[1-9]|1[0-9]|2[0-4])|\*\/(?:[1-9]|1[0-9]|2[0-4]))\s+(\*|[1-9]|[12][0-9]|3[01]|[1-9]-(?:[1-9]|[12][0-9]|3[01])|[12][0-9]-(?:[12][0-9]|3[01])|3[01]-3[01]|(?:[1-9]|[12][0-9]|30)\/(?:[1-9]|[12][0-9]|3[01])|\*\/(?:[1-9]|[12][0-9]|3[01]))\s+(\*|[1-9]|1[0-2]|[1-9]-(?:[1-9]|1[0-2])|1[0-2]-1[0-2]|(?:[1-9]|1[0-2])\/(?:[1-9]|1[0-2])|\*\/(?:[1-9]|1[0-2]))\s+(\*|[1-7]|[1-6]-[1-7]|[1-6]\/[1-7]|\*\/[1-7])$ + type: string + etcdDefragTimeout: + description: EtcdDefragTimeout defines the timeout duration for + etcd defrag call + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + heartbeatDuration: + description: HeartbeatDuration defines the duration for members + to send heartbeats. The default value is 10s. + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + image: + description: Image defines the etcd container image and tag + type: string + metrics: + description: Metrics defines the level of detail for exported + metrics of etcd, specify 'extensive' to include histogram metrics. + enum: + - basic + - extensive + type: string + peerUrlTls: + description: |- + PeerUrlTLS contains the ca and server TLS secrets for peer communication within ETCD cluster + Currently, PeerUrlTLS does not require client TLS secrets for gardener implementation of ETCD cluster. + properties: + clientTLSSecretRef: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + serverTLSSecretRef: + description: |- + SecretReference represents a Secret Reference. It has enough information to retrieve secret + in any namespace + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + tlsCASecretRef: + description: SecretReference defines a reference to a secret. + properties: + dataKey: + description: DataKey is the name of the key in the data + map containing the credentials. + type: string + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - serverTLSSecretRef + - tlsCASecretRef + type: object + quota: + anyOf: + - type: integer + - type: string + description: Quota defines the etcd DB quota. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resources: + description: |- + Resources defines the compute Resources required by etcd container. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serverPort: + format: int32 + type: integer + snapshotCount: + description: |- + SnapshotCount defines the number of applied Raft entries to hold in-memory before compaction. + More info: https://etcd.io/docs/v3.4/op-guide/maintenance/#raft-log-retention + format: int64 + type: integer + wrapperPort: + format: int32 + type: integer + type: object + labels: + additionalProperties: + type: string + type: object + priorityClassName: + description: PriorityClassName is the name of a priority class that + shall be used for the etcd pods. + type: string + replicas: + format: int32 + type: integer + x-kubernetes-validations: + - message: Replicas can either be increased or be downscaled to 0. + rule: 'self==0 ? true : self < oldSelf ? false : true' + runAsRoot: + description: |- + RunAsRoot defines whether the securityContext of the pod specification should indicate that the containers shall + run as root. By default, they run as non-root with user 'nobody'. + type: boolean + schedulingConstraints: + description: |- + SchedulingConstraints defines the different scheduling constraints that must be applied to the + pod spec in the etcd statefulset. + Currently supported constraints are Affinity and TopologySpreadConstraints. + properties: + affinity: + description: |- + Affinity defines the various affinity and anti-affinity rules for a pod + that are honoured by the kube-scheduler. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology domains, + that are honoured by the kube-scheduler. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + selector: + description: |- + selector is a label query over pods that should match the replica count. + It must match the pod template's labels. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Deprecated: this field will be removed in the future. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + sharedConfig: + description: SharedConfig defines parameters shared and used by Etcd + as well as backup-restore sidecar. + properties: + autoCompactionMode: + description: AutoCompactionMode defines the auto-compaction-mode:'periodic' + mode or 'revision' mode for etcd and embedded-etcd of backup-restore + sidecar. + enum: + - periodic + - revision + type: string + autoCompactionRetention: + description: AutoCompactionRetention defines the auto-compaction-retention + length for etcd as well as for embedded-etcd of backup-restore + sidecar. + type: string + type: object + storageCapacity: + anyOf: + - type: integer + - type: string + description: StorageCapacity defines the size of persistent volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + description: |- + StorageClass defines the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + x-kubernetes-validations: + - message: etcd.spec.storageClass is an immutable field + rule: self == oldSelf + volumeClaimTemplate: + description: VolumeClaimTemplate defines the volume claim template + to be created + type: string + x-kubernetes-validations: + - message: etcd.spec.VolumeClaimTemplate is an immutable field + rule: self == oldSelf + required: + - backup + - etcd + - labels + - replicas + type: object + x-kubernetes-validations: + - message: etcd.spec.storageClass is an immutable field. + rule: has(oldSelf.storageClass) == has(self.storageClass) + - message: etcd.spec.volumeClaimTemplate is an immutable field. + rule: has(oldSelf.volumeClaimTemplate) == has(self.volumeClaimTemplate) + status: + description: EtcdStatus defines the observed state of Etcd. + properties: + conditions: + description: Conditions represents the latest available observations + of an etcd's current state. + items: + description: Condition holds the information about the state of + a resource. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + lastUpdateTime: + description: Last time the condition was updated. + format: date-time + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of the Etcd condition. + type: string + required: + - lastTransitionTime + - lastUpdateTime + - message + - reason + - status + - type + type: object + type: array + currentReplicas: + description: CurrentReplicas is the current replica count for the + etcd cluster. + format: int32 + type: integer + etcd: + description: CrossVersionObjectReference contains enough information + to let you identify the referred resource. + properties: + apiVersion: + description: API version of the referent + type: string + kind: + description: Kind of the referent + type: string + name: + description: Name of the referent + type: string + required: + - kind + - name + type: object + labelSelector: + description: |- + LabelSelector is a label query over pods that should match the replica count. + It must match the pod template's labels. + Deprecated: this field will be removed in the future. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + lastErrors: + description: LastErrors captures errors that occurred during the last + operation. + items: + description: LastError stores details of the most recent error encountered + for a resource. + properties: + code: + description: Code is an error code that uniquely identifies + an error. + type: string + description: + description: Description is a human-readable message indicating + details of the error. + type: string + observedAt: + description: ObservedAt is the time the error was observed. + format: date-time + type: string + required: + - code + - description + - observedAt + type: object + type: array + lastOperation: + description: LastOperation indicates the last operation performed + on this resource. + properties: + description: + description: Description describes the last operation. + type: string + lastUpdateTime: + description: LastUpdateTime is the time at which the operation + was last updated. + format: date-time + type: string + runID: + description: |- + RunID correlates an operation with a reconciliation run. + Every time an Etcd resource is reconciled (barring status reconciliation which is periodic), a unique ID is + generated which can be used to correlate all actions done as part of a single reconcile run. Capturing this + as part of LastOperation aids in establishing this correlation. This further helps in also easily filtering + reconcile logs as all structured logs in a reconciliation run should have the `runID` referenced. + type: string + state: + description: State is the state of the last operation. + type: string + type: + description: Type is the type of last operation. + type: string + required: + - description + - lastUpdateTime + - runID + - state + - type + type: object + members: + description: Members represents the members of the etcd cluster + items: + description: EtcdMemberStatus holds information about etcd cluster + membership. + properties: + id: + description: ID is the ID of the etcd member. + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition's + status changed. + format: date-time + type: string + name: + description: Name is the name of the etcd member. It is the + name of the backing `Pod`. + type: string + reason: + description: The reason for the condition's last transition. + type: string + role: + description: Role is the role in the etcd cluster, either `Leader` + or `Member`. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + required: + - lastTransitionTime + - name + - reason + - status + type: object + type: array + observedGeneration: + description: ObservedGeneration is the most recent generation observed + for this resource. + format: int64 + type: integer + peerUrlTLSEnabled: + description: PeerUrlTLSEnabled captures the state of peer url TLS + being enabled for the etcd member(s) + type: boolean + ready: + description: Ready is `true` if all etcd replicas are ready. + type: boolean + readyReplicas: + description: ReadyReplicas is the count of replicas being ready in + the etcd cluster. + format: int32 + type: integer + replicas: + description: Replicas is the replica count of the etcd cluster. + format: int32 + type: integer + selector: + description: |- + Selector is a label query over pods that should match the replica count. + It must match the pod template's labels. + type: string + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} \ No newline at end of file diff --git a/contrib/production/kcp-comer/certificate-etcd.yaml b/contrib/production/kcp-comer/certificate-etcd.yaml new file mode 100644 index 00000000000..66d4d55fda0 --- /dev/null +++ b/contrib/production/kcp-comer/certificate-etcd.yaml @@ -0,0 +1,144 @@ +--- +# Root etcd CA certificate for all etcd components across all kcp deployment. +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-ca + namespace: kcp-comer +spec: + secretName: etcd-ca-tls + isCA: true + commonName: etcd-ca + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd Peer CA certificate (for peer communication) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-peer-ca + namespace: kcp-comer +spec: + secretName: etcd-peer-ca-tls + isCA: true + commonName: etcd-peer-ca + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd Peer CA issuer +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: etcd-peer-ca + namespace: kcp-comer +spec: + ca: + secretName: etcd-peer-ca-tls +--- +# etcd server certificate (for client connections) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-server-tls + namespace: kcp-comer +spec: + secretName: etcd-server-tls + commonName: etcd-server + dnsNames: + - root-local + - root-client + - root-client.kcp-comer + - root-client.kcp-comer.svc + - root-client.kcp-comer.svc.cluster.local + - "*.root-peer" + - "*.root-peer.kcp-comer" + - "*.root-peer.kcp-comer.svc" + - "*.root-peer.kcp-comer.svc.cluster.local" + - alpha-local + - alpha-client + - alpha-client.kcp-comer + - alpha-client.kcp-comer.svc + - alpha-client.kcp-comer.svc.cluster.local + - "*.alpha-peer" + - "*.alpha-peer.kcp-comer" + - "*.alpha-peer.kcp-comer.svc" + - "*.alpha-peer.kcp-comer.svc.cluster.local" + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd client certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-client-tls + namespace: kcp-comer +spec: + secretName: etcd-client-tls + commonName: etcd-client + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd peer certificate (for etcd member communication) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-peer-tls + namespace: kcp-comer +spec: + secretName: etcd-peer-tls + commonName: etcd-peer + dnsNames: + - root-peer + - root-peer.kcp-comer + - root-peer.kcp-comer.svc + - root-peer.kcp-comer.svc.cluster.local + - "*.root-peer" + - "*.root-peer.kcp-comer" + - "*.root-peer.kcp-comer.svc" + - "*.root-peer.kcp-comer.svc.cluster.local" + - alpha-peer + - alpha-peer.kcp-comer + - alpha-peer.kcp-comer.svc + - alpha-peer.kcp-comer.svc.cluster.local + - "*.alpha-peer" + - "*.alpha-peer.kcp-comer" + - "*.alpha-peer.kcp-comer.svc" + - "*.alpha-peer.kcp-comer.svc.cluster.local" + issuerRef: + name: etcd-peer-ca + kind: Issuer +--- +# etcd backup-restore server certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-backup-restore-server-tls + namespace: kcp-comer +spec: + secretName: etcd-backup-restore-server-tls + commonName: etcd-backup-restore-server + dnsNames: + - root-local + - alpha-local + - localhost + - 127.0.0.1 + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd backup-restore client certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-backup-restore-client-tls + namespace: kcp-comer +spec: + secretName: etcd-backup-restore-client-tls + commonName: etcd-backup-restore-client + issuerRef: + name: etcd-ca + kind: ClusterIssuer \ No newline at end of file diff --git a/contrib/production/kcp-comer/certificate-kcp.yaml b/contrib/production/kcp-comer/certificate-kcp.yaml new file mode 100644 index 00000000000..d2a84011384 --- /dev/null +++ b/contrib/production/kcp-comer/certificate-kcp.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned + namespace: kcp-comer +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: kcp-ca + namespace: kcp-comer +spec: + secretName: kcp-ca-tls + isCA: true + commonName: kcp-ca + issuerRef: + name: selfsigned + kind: Issuer +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: kcp-ca + namespace: kcp-comer +spec: + ca: + secretName: kcp-ca-tls \ No newline at end of file diff --git a/contrib/production/kcp-comer/etcd-druid-alpha.yaml b/contrib/production/kcp-comer/etcd-druid-alpha.yaml new file mode 100644 index 00000000000..83107034f38 --- /dev/null +++ b/contrib/production/kcp-comer/etcd-druid-alpha.yaml @@ -0,0 +1,67 @@ +apiVersion: druid.gardener.cloud/v1alpha1 +kind: Etcd +metadata: + name: alpha + namespace: kcp-comer + labels: + app: etcd-statefulset + role: alpha +spec: + replicas: 3 + + etcd: + metrics: basic + defragmentationSchedule: "0 */24 * * *" + resources: + limits: { cpu: 500m, memory: 1Gi } + requests: { cpu: 100m, memory: 200Mi } + clientPort: 2379 + serverPort: 2380 + quota: 8Gi + + # configure the certificates we just created + + clientUrlTls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-server-tls" } + clientTLSSecretRef: { name: "etcd-client-tls" } + + peerUrlTls: + tlsCASecretRef: { name: "etcd-peer-ca-tls" } + serverTLSSecretRef: { name: "etcd-peer-tls" } + clientTLSSecretRef: { name: "etcd-peer-tls" } + + backup: + port: 8080 + fullSnapshotSchedule: "0 */24 * * *" + resources: + limits: { cpu: 200m, memory: 1Gi } + requests: { cpu: 23m, memory: 128Mi } + garbageCollectionPolicy: Exponential + garbageCollectionPeriod: 43200s + deltaSnapshotPeriod: 300s + deltaSnapshotMemoryLimit: 1Gi + compression: + enabled: false + policy: "gzip" + leaderElection: + reelectionPeriod: 5s + etcdConnectionTimeout: 5s + + # configure the certificates we just created + + tls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-backup-restore-server-tls" } + clientTLSSecretRef: { name: "etcd-backup-restore-client-tls" } + + sharedConfig: + autoCompactionMode: periodic + autoCompactionRetention: "30m" + + annotations: + app: etcd-statefulset + role: alpha + labels: + app: etcd-statefulset + role: alpha \ No newline at end of file diff --git a/contrib/production/kcp-comer/etcd-druid-root.yaml b/contrib/production/kcp-comer/etcd-druid-root.yaml new file mode 100644 index 00000000000..28d16605718 --- /dev/null +++ b/contrib/production/kcp-comer/etcd-druid-root.yaml @@ -0,0 +1,67 @@ +apiVersion: druid.gardener.cloud/v1alpha1 +kind: Etcd +metadata: + name: root + namespace: kcp-comer + labels: + app: etcd-statefulset + role: root +spec: + replicas: 3 + + etcd: + metrics: basic + defragmentationSchedule: "0 */24 * * *" + resources: + limits: { cpu: 500m, memory: 1Gi } + requests: { cpu: 100m, memory: 200Mi } + clientPort: 2379 + serverPort: 2380 + quota: 8Gi + + # configure the certificates we just created + + clientUrlTls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-server-tls" } + clientTLSSecretRef: { name: "etcd-client-tls" } + + peerUrlTls: + tlsCASecretRef: { name: "etcd-peer-ca-tls" } + serverTLSSecretRef: { name: "etcd-peer-tls" } + clientTLSSecretRef: { name: "etcd-peer-tls" } + + backup: + port: 8080 + fullSnapshotSchedule: "0 */24 * * *" + resources: + limits: { cpu: 200m, memory: 1Gi } + requests: { cpu: 23m, memory: 128Mi } + garbageCollectionPolicy: Exponential + garbageCollectionPeriod: 43200s + deltaSnapshotPeriod: 300s + deltaSnapshotMemoryLimit: 1Gi + compression: + enabled: false + policy: "gzip" + leaderElection: + reelectionPeriod: 5s + etcdConnectionTimeout: 5s + + # configure the certificates we just created + + tls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-backup-restore-server-tls" } + clientTLSSecretRef: { name: "etcd-backup-restore-client-tls" } + + sharedConfig: + autoCompactionMode: periodic + autoCompactionRetention: "30m" + + annotations: + app: etcd-statefulset + role: root + labels: + app: etcd-statefulset + role: root \ No newline at end of file diff --git a/contrib/production/kcp-comer/kcp-alpha-shard.yaml b/contrib/production/kcp-comer/kcp-alpha-shard.yaml new file mode 100644 index 00000000000..b2ba9699016 --- /dev/null +++ b/contrib/production/kcp-comer/kcp-alpha-shard.yaml @@ -0,0 +1,35 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: Shard +metadata: + name: alpha + namespace: kcp-comer +spec: + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + etcd: + endpoints: + - https://alpha-client.kcp-comer.svc.cluster.local:2379 + tlsConfig: + secretRef: + name: etcd-ca-tls + shardBaseURL: https://alpha.comer.example.com:6443 + certificateTemplates: + server: + spec: + dnsNames: + - alpha.comer.example.com + rootShard: + ref: + name: root + serviceTemplate: + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-comer/kcp-front-proxy-internal.yaml b/contrib/production/kcp-comer/kcp-front-proxy-internal.yaml new file mode 100644 index 00000000000..0dafca69687 --- /dev/null +++ b/contrib/production/kcp-comer/kcp-front-proxy-internal.yaml @@ -0,0 +1,32 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: FrontProxy +metadata: + name: frontproxy-internal + namespace: kcp-comer +spec: + external: + hostname: api-internal.comer.example.com + port: 6443 + rootShard: + ref: + name: root + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + serviceTemplate: + metadata: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-comer/kcp-front-proxy.yaml b/contrib/production/kcp-comer/kcp-front-proxy.yaml new file mode 100644 index 00000000000..d53b28fef1a --- /dev/null +++ b/contrib/production/kcp-comer/kcp-front-proxy.yaml @@ -0,0 +1,34 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: FrontProxy +metadata: + name: frontproxy + namespace: kcp-comer +spec: + external: + hostname: api.comer.example.com + port: 443 + caBundleSecretRef: + name: google-we1-ca + rootShard: + ref: + name: root + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + serviceTemplate: + metadata: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-comer/kcp-root-shard.yaml b/contrib/production/kcp-comer/kcp-root-shard.yaml new file mode 100644 index 00000000000..071acfc73e0 --- /dev/null +++ b/contrib/production/kcp-comer/kcp-root-shard.yaml @@ -0,0 +1,48 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: RootShard +metadata: + name: root + namespace: kcp-comer +spec: + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + external: + hostname: api.comer.example.com + port: 443 + privateHostname: api-internal.comer.example.com + privatePort: 6443 + certificates: + # this references the issuer created above + issuerRef: + group: cert-manager.io + kind: Issuer + name: kcp-ca + cache: + embedded: + # kcp comes with a cache server accessible to all shards, + # in this case it is fine to enable the embedded instance + enabled: true + etcd: + endpoints: + - https://root-client.kcp-comer.svc.cluster.local:2379 + tlsConfig: + secretRef: + name: etcd-ca-tls + shardBaseURL: https://root.comer.example.com:6443 + certificateTemplates: + server: + spec: + dnsNames: + - root.comer.example.com + serviceTemplate: + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-comer/kubeconfig-kcp-admin-internal.yaml b/contrib/production/kcp-comer/kubeconfig-kcp-admin-internal.yaml new file mode 100644 index 00000000000..aa233d30c6d --- /dev/null +++ b/contrib/production/kcp-comer/kubeconfig-kcp-admin-internal.yaml @@ -0,0 +1,15 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: Kubeconfig +metadata: + name: kcp-admin-frontproxy-internal + namespace: kcp-comer +spec: + username: kcp-admin + groups: + - system:kcp:admin + validity: 8766h + secretRef: + name: kcp-admin-frontproxy-internal + target: + frontProxyRef: + name: frontproxy-internal diff --git a/contrib/production/kcp-comer/kubeconfig-kcp-admin.yaml b/contrib/production/kcp-comer/kubeconfig-kcp-admin.yaml new file mode 100644 index 00000000000..e34984ac330 --- /dev/null +++ b/contrib/production/kcp-comer/kubeconfig-kcp-admin.yaml @@ -0,0 +1,15 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: Kubeconfig +metadata: + name: kcp-admin-frontproxy + namespace: kcp-comer +spec: + username: kcp-admin + groups: + - system:kcp:admin + validity: 8766h + secretRef: + name: kcp-admin-frontproxy + target: + frontProxyRef: + name: frontproxy diff --git a/contrib/production/kcp-dekker/certificate-etcd.yaml b/contrib/production/kcp-dekker/certificate-etcd.yaml new file mode 100644 index 00000000000..7625eca369b --- /dev/null +++ b/contrib/production/kcp-dekker/certificate-etcd.yaml @@ -0,0 +1,144 @@ +--- +# Root etcd CA certificate for all etcd components across all kcp deployment. +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-ca + namespace: kcp-dekker +spec: + secretName: etcd-ca-tls + isCA: true + commonName: etcd-ca + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd Peer CA certificate (for peer communication) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-peer-ca + namespace: kcp-dekker +spec: + secretName: etcd-peer-ca-tls + isCA: true + commonName: etcd-peer-ca + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd Peer CA issuer +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: etcd-peer-ca + namespace: kcp-dekker +spec: + ca: + secretName: etcd-peer-ca-tls +--- +# etcd server certificate (for client connections) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-server-tls + namespace: kcp-dekker +spec: + secretName: etcd-server-tls + commonName: etcd-server + dnsNames: + - root-local + - root-client + - root-client.kcp-dekker + - root-client.kcp-dekker.svc + - root-client.kcp-dekker.svc.cluster.local + - "*.root-peer" + - "*.root-peer.kcp-dekker" + - "*.root-peer.kcp-dekker.svc" + - "*.root-peer.kcp-dekker.svc.cluster.local" + - alpha-local + - alpha-client + - alpha-client.kcp-dekker + - alpha-client.kcp-dekker.svc + - alpha-client.kcp-dekker.svc.cluster.local + - "*.alpha-peer" + - "*.alpha-peer.kcp-dekker" + - "*.alpha-peer.kcp-dekker.svc" + - "*.alpha-peer.kcp-dekker.svc.cluster.local" + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd client certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-client-tls + namespace: kcp-dekker +spec: + secretName: etcd-client-tls + commonName: etcd-client + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd peer certificate (for etcd member communication) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-peer-tls + namespace: kcp-dekker +spec: + secretName: etcd-peer-tls + commonName: etcd-peer + dnsNames: + - root-peer + - root-peer.kcp-dekker + - root-peer.kcp-dekker.svc + - root-peer.kcp-dekker.svc.cluster.local + - "*.root-peer" + - "*.root-peer.kcp-dekker" + - "*.root-peer.kcp-dekker.svc" + - "*.root-peer.kcp-dekker.svc.cluster.local" + - alpha-peer + - alpha-peer.kcp-dekker + - alpha-peer.kcp-dekker.svc + - alpha-peer.kcp-dekker.svc.cluster.local + - "*.alpha-peer" + - "*.alpha-peer.kcp-dekker" + - "*.alpha-peer.kcp-dekker.svc" + - "*.alpha-peer.kcp-dekker.svc.cluster.local" + issuerRef: + name: etcd-peer-ca + kind: Issuer +--- +# etcd backup-restore server certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-backup-restore-server-tls + namespace: kcp-dekker +spec: + secretName: etcd-backup-restore-server-tls + commonName: etcd-backup-restore-server + dnsNames: + - root-local + - alpha-local + - localhost + - 127.0.0.1 + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd backup-restore client certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-backup-restore-client-tls + namespace: kcp-dekker +spec: + secretName: etcd-backup-restore-client-tls + commonName: etcd-backup-restore-client + issuerRef: + name: etcd-ca + kind: ClusterIssuer \ No newline at end of file diff --git a/contrib/production/kcp-dekker/certificate-kcp.yaml b/contrib/production/kcp-dekker/certificate-kcp.yaml new file mode 100644 index 00000000000..74e6a6b4697 --- /dev/null +++ b/contrib/production/kcp-dekker/certificate-kcp.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned + namespace: kcp-dekker +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: kcp-ca + namespace: kcp-dekker +spec: + secretName: kcp-ca-tls + isCA: true + commonName: kcp-ca + issuerRef: + name: selfsigned + kind: Issuer +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: kcp-ca + namespace: kcp-dekker +spec: + ca: + secretName: kcp-ca-tls \ No newline at end of file diff --git a/contrib/production/kcp-dekker/etcd-druid-alpha.yaml b/contrib/production/kcp-dekker/etcd-druid-alpha.yaml new file mode 100644 index 00000000000..775ef9ef825 --- /dev/null +++ b/contrib/production/kcp-dekker/etcd-druid-alpha.yaml @@ -0,0 +1,67 @@ +apiVersion: druid.gardener.cloud/v1alpha1 +kind: Etcd +metadata: + name: alpha + namespace: kcp-dekker + labels: + app: etcd-statefulset + role: alpha +spec: + replicas: 3 + + etcd: + metrics: basic + defragmentationSchedule: "0 */24 * * *" + resources: + limits: { cpu: 500m, memory: 1Gi } + requests: { cpu: 100m, memory: 200Mi } + clientPort: 2379 + serverPort: 2380 + quota: 8Gi + + # configure the certificates we just created + + clientUrlTls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-server-tls" } + clientTLSSecretRef: { name: "etcd-client-tls" } + + peerUrlTls: + tlsCASecretRef: { name: "etcd-peer-ca-tls" } + serverTLSSecretRef: { name: "etcd-peer-tls" } + clientTLSSecretRef: { name: "etcd-peer-tls" } + + backup: + port: 8080 + fullSnapshotSchedule: "0 */24 * * *" + resources: + limits: { cpu: 200m, memory: 1Gi } + requests: { cpu: 23m, memory: 128Mi } + garbageCollectionPolicy: Exponential + garbageCollectionPeriod: 43200s + deltaSnapshotPeriod: 300s + deltaSnapshotMemoryLimit: 1Gi + compression: + enabled: false + policy: "gzip" + leaderElection: + reelectionPeriod: 5s + etcdConnectionTimeout: 5s + + # configure the certificates we just created + + tls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-backup-restore-server-tls" } + clientTLSSecretRef: { name: "etcd-backup-restore-client-tls" } + + sharedConfig: + autoCompactionMode: periodic + autoCompactionRetention: "30m" + + annotations: + app: etcd-statefulset + role: alpha + labels: + app: etcd-statefulset + role: alpha \ No newline at end of file diff --git a/contrib/production/kcp-dekker/etcd-druid-root.yaml b/contrib/production/kcp-dekker/etcd-druid-root.yaml new file mode 100644 index 00000000000..2464b4ba2b2 --- /dev/null +++ b/contrib/production/kcp-dekker/etcd-druid-root.yaml @@ -0,0 +1,67 @@ +apiVersion: druid.gardener.cloud/v1alpha1 +kind: Etcd +metadata: + name: root + namespace: kcp-dekker + labels: + app: etcd-statefulset + role: root +spec: + replicas: 3 + + etcd: + metrics: basic + defragmentationSchedule: "0 */24 * * *" + resources: + limits: { cpu: 500m, memory: 1Gi } + requests: { cpu: 100m, memory: 200Mi } + clientPort: 2379 + serverPort: 2380 + quota: 8Gi + + # configure the certificates we just created + + clientUrlTls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-server-tls" } + clientTLSSecretRef: { name: "etcd-client-tls" } + + peerUrlTls: + tlsCASecretRef: { name: "etcd-peer-ca-tls" } + serverTLSSecretRef: { name: "etcd-peer-tls" } + clientTLSSecretRef: { name: "etcd-peer-tls" } + + backup: + port: 8080 + fullSnapshotSchedule: "0 */24 * * *" + resources: + limits: { cpu: 200m, memory: 1Gi } + requests: { cpu: 23m, memory: 128Mi } + garbageCollectionPolicy: Exponential + garbageCollectionPeriod: 43200s + deltaSnapshotPeriod: 300s + deltaSnapshotMemoryLimit: 1Gi + compression: + enabled: false + policy: "gzip" + leaderElection: + reelectionPeriod: 5s + etcdConnectionTimeout: 5s + + # configure the certificates we just created + + tls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-backup-restore-server-tls" } + clientTLSSecretRef: { name: "etcd-backup-restore-client-tls" } + + sharedConfig: + autoCompactionMode: periodic + autoCompactionRetention: "30m" + + annotations: + app: etcd-statefulset + role: root + labels: + app: etcd-statefulset + role: root \ No newline at end of file diff --git a/contrib/production/kcp-dekker/kcp-alpha-shard.yaml b/contrib/production/kcp-dekker/kcp-alpha-shard.yaml new file mode 100644 index 00000000000..264ff22f36f --- /dev/null +++ b/contrib/production/kcp-dekker/kcp-alpha-shard.yaml @@ -0,0 +1,26 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: Shard +metadata: + name: alpha + namespace: kcp-dekker +spec: + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + etcd: + endpoints: + - https://alpha-client.kcp-dekker.svc.cluster.local:2379 + tlsConfig: + secretRef: + name: etcd-ca-tls + rootShard: + ref: + name: root \ No newline at end of file diff --git a/contrib/production/kcp-dekker/kcp-front-proxy.yaml b/contrib/production/kcp-dekker/kcp-front-proxy.yaml new file mode 100644 index 00000000000..28d77075116 --- /dev/null +++ b/contrib/production/kcp-dekker/kcp-front-proxy.yaml @@ -0,0 +1,32 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: FrontProxy +metadata: + name: frontproxy + namespace: kcp-dekker +spec: + external: + hostname: api.dekker.example.com + port: 6443 + rootShard: + ref: + name: root + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + serviceTemplate: + metadata: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-dekker/kcp-root-shard.yaml b/contrib/production/kcp-dekker/kcp-root-shard.yaml new file mode 100644 index 00000000000..ea1012fe5f1 --- /dev/null +++ b/contrib/production/kcp-dekker/kcp-root-shard.yaml @@ -0,0 +1,37 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: RootShard +metadata: + name: root + namespace: kcp-dekker +spec: + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + external: + hostname: api.dekker.example.com + port: 6443 + certificates: + # this references the issuer created above + issuerRef: + group: cert-manager.io + kind: Issuer + name: kcp-ca + cache: + embedded: + # kcp comes with a cache server accessible to all shards, + # in this case it is fine to enable the embedded instance + enabled: true + etcd: + endpoints: + - https://root-client.kcp-dekker.svc.cluster.local:2379 + tlsConfig: + secretRef: + name: etcd-ca-tls \ No newline at end of file diff --git a/contrib/production/kcp-dekker/kubeconfig-kcp-admin.yaml b/contrib/production/kcp-dekker/kubeconfig-kcp-admin.yaml new file mode 100644 index 00000000000..e26bc7b8661 --- /dev/null +++ b/contrib/production/kcp-dekker/kubeconfig-kcp-admin.yaml @@ -0,0 +1,15 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: Kubeconfig +metadata: + name: kcp-admin-frontproxy + namespace: kcp-dekker +spec: + username: kcp-admin + groups: + - system:kcp:admin + validity: 8766h # 1 year + secretRef: + name: kcp-admin-frontproxy + target: + frontProxyRef: + name: frontproxy diff --git a/contrib/production/kcp-vespucci/certificate-etcd.yaml b/contrib/production/kcp-vespucci/certificate-etcd.yaml new file mode 100644 index 00000000000..b4de2ed9079 --- /dev/null +++ b/contrib/production/kcp-vespucci/certificate-etcd.yaml @@ -0,0 +1,144 @@ +--- +# Root etcd CA certificate for all etcd components across all kcp deployment. +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-ca + namespace: kcp-vespucci +spec: + secretName: etcd-ca-tls + isCA: true + commonName: etcd-ca + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd Peer CA certificate (for peer communication) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-peer-ca + namespace: kcp-vespucci +spec: + secretName: etcd-peer-ca-tls + isCA: true + commonName: etcd-peer-ca + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd Peer CA issuer +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: etcd-peer-ca + namespace: kcp-vespucci +spec: + ca: + secretName: etcd-peer-ca-tls +--- +# etcd server certificate (for client connections) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-server-tls + namespace: kcp-vespucci +spec: + secretName: etcd-server-tls + commonName: etcd-server + dnsNames: + - root-local + - root-client + - root-client.kcp-vespucci + - root-client.kcp-vespucci.svc + - root-client.kcp-vespucci.svc.cluster.local + - "*.root-peer" + - "*.root-peer.kcp-vespucci" + - "*.root-peer.kcp-vespucci.svc" + - "*.root-peer.kcp-vespucci.svc.cluster.local" + - alpha-local + - alpha-client + - alpha-client.kcp-vespucci + - alpha-client.kcp-vespucci.svc + - alpha-client.kcp-vespucci.svc.cluster.local + - "*.alpha-peer" + - "*.alpha-peer.kcp-vespucci" + - "*.alpha-peer.kcp-vespucci.svc" + - "*.alpha-peer.kcp-vespucci.svc.cluster.local" + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd client certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-client-tls + namespace: kcp-vespucci +spec: + secretName: etcd-client-tls + commonName: etcd-client + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd peer certificate (for etcd member communication) +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-peer-tls + namespace: kcp-vespucci +spec: + secretName: etcd-peer-tls + commonName: etcd-peer + dnsNames: + - root-peer + - root-peer.kcp-vespucci + - root-peer.kcp-vespucci.svc + - root-peer.kcp-vespucci.svc.cluster.local + - "*.root-peer" + - "*.root-peer.kcp-vespucci" + - "*.root-peer.kcp-vespucci.svc" + - "*.root-peer.kcp-vespucci.svc.cluster.local" + - alpha-peer + - alpha-peer.kcp-vespucci + - alpha-peer.kcp-vespucci.svc + - alpha-peer.kcp-vespucci.svc.cluster.local + - "*.alpha-peer" + - "*.alpha-peer.kcp-vespucci" + - "*.alpha-peer.kcp-vespucci.svc" + - "*.alpha-peer.kcp-vespucci.svc.cluster.local" + issuerRef: + name: etcd-peer-ca + kind: Issuer +--- +# etcd backup-restore server certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-backup-restore-server-tls + namespace: kcp-vespucci +spec: + secretName: etcd-backup-restore-server-tls + commonName: etcd-backup-restore-server + dnsNames: + - root-local + - alpha-local + - localhost + - 127.0.0.1 + issuerRef: + name: etcd-ca + kind: ClusterIssuer +--- +# etcd backup-restore client certificate +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: etcd-backup-restore-client-tls + namespace: kcp-vespucci +spec: + secretName: etcd-backup-restore-client-tls + commonName: etcd-backup-restore-client + issuerRef: + name: etcd-ca + kind: ClusterIssuer \ No newline at end of file diff --git a/contrib/production/kcp-vespucci/certificate-kcp.yaml b/contrib/production/kcp-vespucci/certificate-kcp.yaml new file mode 100644 index 00000000000..db12eb2a2de --- /dev/null +++ b/contrib/production/kcp-vespucci/certificate-kcp.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned + namespace: kcp-vespucci +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: kcp-ca + namespace: kcp-vespucci +spec: + secretName: kcp-ca-tls + isCA: true + commonName: kcp-ca + issuerRef: + name: selfsigned + kind: Issuer +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: kcp-ca + namespace: kcp-vespucci +spec: + ca: + secretName: kcp-ca-tls \ No newline at end of file diff --git a/contrib/production/kcp-vespucci/etcd-druid-alpha.yaml b/contrib/production/kcp-vespucci/etcd-druid-alpha.yaml new file mode 100644 index 00000000000..2cc06d695a8 --- /dev/null +++ b/contrib/production/kcp-vespucci/etcd-druid-alpha.yaml @@ -0,0 +1,67 @@ +apiVersion: druid.gardener.cloud/v1alpha1 +kind: Etcd +metadata: + name: alpha + namespace: kcp-vespucci + labels: + app: etcd-statefulset + role: alpha +spec: + replicas: 3 + + etcd: + metrics: basic + defragmentationSchedule: "0 */24 * * *" + resources: + limits: { cpu: 500m, memory: 1Gi } + requests: { cpu: 100m, memory: 200Mi } + clientPort: 2379 + serverPort: 2380 + quota: 8Gi + + # configure the certificates we just created + + clientUrlTls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-server-tls" } + clientTLSSecretRef: { name: "etcd-client-tls" } + + peerUrlTls: + tlsCASecretRef: { name: "etcd-peer-ca-tls" } + serverTLSSecretRef: { name: "etcd-peer-tls" } + clientTLSSecretRef: { name: "etcd-peer-tls" } + + backup: + port: 8080 + fullSnapshotSchedule: "0 */24 * * *" + resources: + limits: { cpu: 200m, memory: 1Gi } + requests: { cpu: 23m, memory: 128Mi } + garbageCollectionPolicy: Exponential + garbageCollectionPeriod: 43200s + deltaSnapshotPeriod: 300s + deltaSnapshotMemoryLimit: 1Gi + compression: + enabled: false + policy: "gzip" + leaderElection: + reelectionPeriod: 5s + etcdConnectionTimeout: 5s + + # configure the certificates we just created + + tls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-backup-restore-server-tls" } + clientTLSSecretRef: { name: "etcd-backup-restore-client-tls" } + + sharedConfig: + autoCompactionMode: periodic + autoCompactionRetention: "30m" + + annotations: + app: etcd-statefulset + role: alpha + labels: + app: etcd-statefulset + role: alpha \ No newline at end of file diff --git a/contrib/production/kcp-vespucci/etcd-druid-root.yaml b/contrib/production/kcp-vespucci/etcd-druid-root.yaml new file mode 100644 index 00000000000..3846a136e1e --- /dev/null +++ b/contrib/production/kcp-vespucci/etcd-druid-root.yaml @@ -0,0 +1,67 @@ +apiVersion: druid.gardener.cloud/v1alpha1 +kind: Etcd +metadata: + name: root + namespace: kcp-vespucci + labels: + app: etcd-statefulset + role: root +spec: + replicas: 3 + + etcd: + metrics: basic + defragmentationSchedule: "0 */24 * * *" + resources: + limits: { cpu: 500m, memory: 1Gi } + requests: { cpu: 100m, memory: 200Mi } + clientPort: 2379 + serverPort: 2380 + quota: 8Gi + + # configure the certificates we just created + + clientUrlTls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-server-tls" } + clientTLSSecretRef: { name: "etcd-client-tls" } + + peerUrlTls: + tlsCASecretRef: { name: "etcd-peer-ca-tls" } + serverTLSSecretRef: { name: "etcd-peer-tls" } + clientTLSSecretRef: { name: "etcd-peer-tls" } + + backup: + port: 8080 + fullSnapshotSchedule: "0 */24 * * *" + resources: + limits: { cpu: 200m, memory: 1Gi } + requests: { cpu: 23m, memory: 128Mi } + garbageCollectionPolicy: Exponential + garbageCollectionPeriod: 43200s + deltaSnapshotPeriod: 300s + deltaSnapshotMemoryLimit: 1Gi + compression: + enabled: false + policy: "gzip" + leaderElection: + reelectionPeriod: 5s + etcdConnectionTimeout: 5s + + # configure the certificates we just created + + tls: + tlsCASecretRef: { name: "etcd-ca-tls" } + serverTLSSecretRef: { name: "etcd-backup-restore-server-tls" } + clientTLSSecretRef: { name: "etcd-backup-restore-client-tls" } + + sharedConfig: + autoCompactionMode: periodic + autoCompactionRetention: "30m" + + annotations: + app: etcd-statefulset + role: root + labels: + app: etcd-statefulset + role: root \ No newline at end of file diff --git a/contrib/production/kcp-vespucci/kcp-alpha-shard.yaml b/contrib/production/kcp-vespucci/kcp-alpha-shard.yaml new file mode 100644 index 00000000000..f848432f84f --- /dev/null +++ b/contrib/production/kcp-vespucci/kcp-alpha-shard.yaml @@ -0,0 +1,37 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: Shard +metadata: + name: alpha + namespace: kcp-vespucci +spec: + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + caBundleSecretRef: + name: letsencrypt-ca + etcd: + endpoints: + - https://alpha-client.kcp-vespucci.svc.cluster.local:2379 + tlsConfig: + secretRef: + name: etcd-ca-tls + shardBaseURL: https://alpha.vespucci.example.com:6443 + certificateTemplates: + server: + spec: + dnsNames: + - alpha.vespucci.example.com + rootShard: + ref: + name: root + serviceTemplate: + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-vespucci/kcp-front-proxy.yaml b/contrib/production/kcp-vespucci/kcp-front-proxy.yaml new file mode 100644 index 00000000000..74b88209db1 --- /dev/null +++ b/contrib/production/kcp-vespucci/kcp-front-proxy.yaml @@ -0,0 +1,46 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: FrontProxy +metadata: + name: frontproxy + namespace: kcp-vespucci +spec: + external: + hostname: api.vespucci.example.com + port: 6443 + rootShard: + ref: + name: root + caBundleSecretRef: + name: letsencrypt-ca + certificateTemplates: + server: + metadata: + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + spec: + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + group: cert-manager.io + dnsNames: + - api.vespucci.example.com + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + serviceTemplate: + metadata: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-vespucci/kcp-root-shard.yaml b/contrib/production/kcp-vespucci/kcp-root-shard.yaml new file mode 100644 index 00000000000..4dc8e65a002 --- /dev/null +++ b/contrib/production/kcp-vespucci/kcp-root-shard.yaml @@ -0,0 +1,48 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: RootShard +metadata: + name: root + namespace: kcp-vespucci +spec: + auth: + serviceAccount: + enabled: true + oidc: + issuerURL: https://auth.example.com + clientID: platform-mesh + groupsClaim: groups + usernameClaim: email + usernamePrefix: "oidc:" + groupsPrefix: "oidc:" + clientSecret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + caBundleSecretRef: + name: letsencrypt-ca + external: + hostname: api.vespucci.example.com + port: 6443 + certificates: + # this references the issuer created above + issuerRef: + group: cert-manager.io + kind: Issuer + name: kcp-ca + cache: + embedded: + # kcp comes with a cache server accessible to all shards, + # in this case it is fine to enable the embedded instance + enabled: true + shardBaseURL: https://root.vespucci.example.com:6443 + certificateTemplates: + server: + spec: + dnsNames: + - root.vespucci.example.com + etcd: + endpoints: + - https://root-client.kcp-vespucci.svc.cluster.local:2379 + tlsConfig: + secretRef: + name: etcd-ca-tls + serviceTemplate: + spec: + type: LoadBalancer \ No newline at end of file diff --git a/contrib/production/kcp-vespucci/kubeconfig-kcp-admin.yaml b/contrib/production/kcp-vespucci/kubeconfig-kcp-admin.yaml new file mode 100644 index 00000000000..6f04d52262b --- /dev/null +++ b/contrib/production/kcp-vespucci/kubeconfig-kcp-admin.yaml @@ -0,0 +1,15 @@ +apiVersion: operator.kcp.io/v1alpha1 +kind: Kubeconfig +metadata: + name: kcp-admin-frontproxy + namespace: kcp-vespucci +spec: + username: kcp-admin + groups: + - system:kcp:admin + validity: 8766h # 1 year + secretRef: + name: kcp-admin-frontproxy + target: + frontProxyRef: + name: frontproxy \ No newline at end of file diff --git a/contrib/production/oidc-dex/certificate-dns.yaml b/contrib/production/oidc-dex/certificate-dns.yaml new file mode 100644 index 00000000000..4b11ed13dc9 --- /dev/null +++ b/contrib/production/oidc-dex/certificate-dns.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: dex-tls-cert + namespace: oidc +spec: + secretName: dex-tls + issuerRef: + name: kcp-comerletsencrypt-prod + kind: ClusterIssuer + group: cert-manager.io + dnsNames: + - auth.example.com + usages: + - digital signature + - key encipherment \ No newline at end of file diff --git a/contrib/production/oidc-dex/postgres-cluster.yaml b/contrib/production/oidc-dex/postgres-cluster.yaml new file mode 100644 index 00000000000..8536c5b9da0 --- /dev/null +++ b/contrib/production/oidc-dex/postgres-cluster.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-auth + namespace: oidc +spec: + instances: 1 + bootstrap: + initdb: + database: dex + owner: dex + secret: + name: dex-postgres + enableSuperuserAccess: true + superuserSecret: + name: dex-superuser + storage: + size: 10Gi +--- +apiVersion: v1 +data: + username: ZGV4Cg== + password: cGFzc3dvcmQ= +kind: Secret +metadata: + namespace: oidc + name: dex-postgres +type: kubernetes.io/basic-auth +--- +apiVersion: v1 +data: + username: cG9zdGdyZXM= + password: cGFzc3dvcmQ= +kind: Secret +metadata: + namespace: oidc + name: dex-superuser +type: kubernetes.io/basic-auth \ No newline at end of file diff --git a/contrib/production/oidc-dex/postgres-database.yaml b/contrib/production/oidc-dex/postgres-database.yaml new file mode 100644 index 00000000000..784661eed50 --- /dev/null +++ b/contrib/production/oidc-dex/postgres-database.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + namespace: oidc + name: db-dex +spec: + name: dex + owner: dex + cluster: + name: pg-auth \ No newline at end of file diff --git a/contrib/production/oidc-dex/values.yaml.template b/contrib/production/oidc-dex/values.yaml.template new file mode 100644 index 00000000000..d370ee54a8b --- /dev/null +++ b/contrib/production/oidc-dex/values.yaml.template @@ -0,0 +1,68 @@ +config: + # The issuer URL is still the public HTTPS URL. + issuer: https://auth.example.com + + logger: + level: "debug" + + storage: + type: postgres + config: + host: pg-auth-rw.oidc.svc.cluster.local + port: 5432 + database: dex + user: postgres + password: password + ssl: + mode: disable + + # Dex will handle TLS termination using the cert-manager certificate + web: + https: 0.0.0.0:5557 + tlsCert: /etc/dex/tls/tls.crt + tlsKey: /etc/dex/tls/tls.key + http: "0.0.0.0:5556" + + # Your connectors and staticClients remain the same... + connectors: + - type: github + id: github + name: GitHub + config: + clientID: xxxxxxx + clientSecret: xxxxxxxxxx + redirectURI: https://auth.example.com/callback + org: platform-mesh + staticClients: + - id: platform-mesh + redirectURIs: + - https://auth.example.com/callback + - http://localhost:8000 + - http://127.0.0.1:8000/ + name: 'PlatformMeshApp' + secret: Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + +# This section configures the Kubernetes Service for Dex. +service: + type: LoadBalancer + ports: + https: + port: 443 + # Disable HTTP port + http: + port: 5556 + +# Enable HTTPS support +https: + enabled: true + +# Mount the cert-manager generated certificate +volumes: +- name: tls-cert + secret: + secretName: dex-tls + +volumeMounts: +- name: tls-cert + mountPath: /etc/dex/tls + readOnly: true \ No newline at end of file diff --git a/docs/content/contributing/governance/general-technical-review.md b/docs/content/contributing/governance/general-technical-review.md index c28df003bbd..48d93098ee6 100644 --- a/docs/content/contributing/governance/general-technical-review.md +++ b/docs/content/contributing/governance/general-technical-review.md @@ -407,7 +407,7 @@ title: General Technical Review # the kubeconfig generated from this should be kept secure at all times - system:kcp:admin # the kubeconfig will be valid for 365d but will be automatically refreshed - validity: 8766h + validity: 8766h # 1 year secretRef: # the name of the secret that the assembled kubeconfig should be written to name: admin-kubeconfig diff --git a/docs/content/contributing/index.md b/docs/content/contributing/index.md index 037ba6d453c..fe7166191ce 100644 --- a/docs/content/contributing/index.md +++ b/docs/content/contributing/index.md @@ -104,7 +104,7 @@ do not hesitate to ping maintainers directly or ask on the project communication ### The Monorepo Structure The core `kcp-dev/kcp` repository is a monorepo containing the kcp core and some close to the core libraries. -See the [monorepo document](./monorepo/) for more details. +See the [monorepo document](./monorepo.md) for more details. When contributing to projects like `kcp-dev/apimachinery`, `kcp-dev/client-go`, and `kcp-dev/code-generator`, you must create a PR in the `kcp-dev/kcp` repository. Direct PRs to these repositories will be closed. diff --git a/docs/content/setup/.pages b/docs/content/setup/.pages index 2efb6e23697..eed8635d4f7 100644 --- a/docs/content/setup/.pages +++ b/docs/content/setup/.pages @@ -4,4 +4,5 @@ nav: - helm.md - kubectl-plugin.md - integrations.md - - production.md + - production + \ No newline at end of file diff --git a/docs/content/setup/production/.pages b/docs/content/setup/production/.pages new file mode 100644 index 00000000000..1cb80f3d6f7 --- /dev/null +++ b/docs/content/setup/production/.pages @@ -0,0 +1,8 @@ +title: Production Deployment +nav: + - index.md + - overview.md + - prerequisites.md + - kcp-dekker.md + - kcp-vespucci.md + - kcp-comer.md \ No newline at end of file diff --git a/docs/content/setup/production/dual-proxy.svg b/docs/content/setup/production/dual-proxy.svg new file mode 100644 index 00000000000..71f7aa0bea6 --- /dev/null +++ b/docs/content/setup/production/dual-proxy.svg @@ -0,0 +1,5 @@ + + +Shard1Provider WorkspaceKCPPublic OIDC Front ProxyShard2External trafficconsumer workspaceconsumer workspace APIExportEndpointSliceAPIExportAPIBindingAPIBindingshard1/urlshard2/urlThese might be 2 sets - public & privateExternal controller/providerInternal Front ProxyExternal trafficKubernetes cluster \ No newline at end of file diff --git a/docs/content/setup/production/high-level.svg b/docs/content/setup/production/high-level.svg new file mode 100644 index 00000000000..5596a2d1381 --- /dev/null +++ b/docs/content/setup/production/high-level.svg @@ -0,0 +1,5 @@ + + +Shard2Provider WorkspaceKCPFront ProxyAPIExportEndpointSliceshard1-urlshard2-urlShard1API-SyncagentMulticluster-runtimeUser / kubectlConsumer Workspace \ No newline at end of file diff --git a/docs/content/setup/production.md b/docs/content/setup/production/index.md similarity index 63% rename from docs/content/setup/production.md rename to docs/content/setup/production/index.md index 9ee7a3e3ff2..a1d64a117f6 100644 --- a/docs/content/setup/production.md +++ b/docs/content/setup/production/index.md @@ -1,11 +1,15 @@ --- description: > - Tips and notes for running a production-grade kcp setup. + Production-ready deployment guide for kcp with multiple certificate management strategies and deployment patterns. --- # Production Setup -This document collects notes and tips on how to run a production-grade kcp setup. +This document provides comprehensive guidance for deploying kcp in production environments with enterprise-grade reliability, security, and scalability. If you are looking "hands on" deployment instructions, please refer to the specific deployment variant guides linked below [#deployment-variants](#deployment-variants). + +!!! note + We are working on extending this documentation further, to include multiple site deployment, where indivudual shards are deployed in the different regions. + This would allow for geo-distributed deployments to mimic real-world usage scenarios. ## Overview @@ -119,10 +123,63 @@ kcp client software does not by accident make false assumptions about sharding. ### High Availability To improve resilience against node failures, it is strongly recommended to not just spread the -workload across multiple shards, but also to ensure that shard pods are distributed across nodes or +kcp workspaces across multiple shards, but also to ensure that shard pods are distributed across nodes or availability zones. The same advice for etcd applies to kcp as well: Use anti-affinities to ensure pods are scheduled properly. ### Backups All kcp data is stored in etcd, there is no need to perform a dedicated kcp backup. + +--- + +## Production Deployment Options + +For specific deployment instructions, kcp production deployments require careful consideration of: + +- **Certificate Management**: Self-signed, Let's Encrypt, or enterprise CA integration +- **High Availability**: Multi-shard deployment with proper load distribution +- **Network Architecture**: Front-proxy configuration and shard accessibility patterns +- **Security**: TLS encryption, RBAC, and authentication integration +- **Observability**: Monitoring, logging, and alerting + +## Deployment Variants + +We provide three reference deployment patterns: + +### [kcp-dekker](kcp-dekker.md) - Self-Signed Certificates +- **Best for**: Development, testing, or closed internal environments +- **Certificate approach**: All certificates are self-signed using an internal CA +- **Access pattern**: Only front-proxy is publicly accessible, shards are private +- **Network**: Simple single-cluster deployment + +### [kcp-vespucci](kcp-vespucci.md) - External Certificates +- **Best for**: Production environments requiring trusted certificates +- **Certificate approach**: Let's Encrypt for front-proxy with public shard access +- **Access pattern**: Both front-proxy and shards are publicly accessible +- **Network**: Multi-zone deployment with external certificate validation + +### [kcp-comer](kcp-comer.md) - Dual Front-Proxy +- **Best for**: Enterprise environments with CDN and edge requirements +- **Certificate approach**: CDN integration with edge re-encryption +- **Access pattern**: Dual front-proxy with CloudFlare integration +- **Network**: Complex multi-layer architecture with edge termination + +## Getting Started + +1. **[Prerequisites](prerequisites.md)**: Install shared components (etcd-druid, cert-manager, kcp-operator, OIDC) +2. **[Architecture Overview](overview.md)**: Understand kcp component communication patterns +3. **Choose Deployment**: Select the appropriate variant for your environment + +## Support Matrix + +| Feature | kcp-dekker | kcp-vespucci | kcp-comer | +|---------|------------|--------------|-----------| +| Self-signed certs | ✓ | - | ✓ | +| Let's Encrypt | - | ✓ | ✓ | +| Public shard access | - | ✓ | ✓ | +| CDN integration | - | - | ✓ | +| Multi-region | ✓ | ✓ | ✓ | +| OIDC authentication | ✓ | ✓ | ✓ | + +Choose the deployment that best matches your security, compliance, and operational requirements. \ No newline at end of file diff --git a/docs/content/setup/production/kcp-comer.md b/docs/content/setup/production/kcp-comer.md new file mode 100644 index 00000000000..00560b53402 --- /dev/null +++ b/docs/content/setup/production/kcp-comer.md @@ -0,0 +1,179 @@ +--- +description: > + Deploy kcp with dual front-proxy and edge re-encryption integration for enterprise environments requiring edge acceleration and advanced networking. +--- + +# kcp-comer: Dual Front-Proxy with Edge re-encryption + +The kcp-comer deployment pattern implements a dual front-proxy architecture with CDN integration, designed for enterprise environments requiring global performance, edge acceleration, and advanced networking capabilities. This can be adjusted to work with various CDN providers, but this guide focuses on CloudFlare integration. + +## Prerequisites + +Ensure all [shared components](prerequisites.md) are installed, plus: + +- **CloudFlare account** with API access +- **Custom domain** with CloudFlare DNS management + +## Architecture Diagram + +``` +Internet → CloudFlare Edge → Front-Proxy (External) → Shards +``` + +## Deployment Steps + +### 1. Configure CloudFlare Integration + +Set up CloudFlare for edge termination and routing: + +**CloudFlare CA Certificate**: Download the CloudFlare edge certificate for extended trust: + +!!! note + Verify this URL with CloudFlare documentation before production use. + +```bash +kubectl create namespace kcp-comer +curl -L -o google-we1.pem https://ssl-tools.net/certificates/108fbf794e18ec5347a414e4370cc4506c297ab2.pem +kubectl create secret generic google-we1-ca --from-file=tls.crt=google-we1.pem -n kcp-comer +``` + +### 2. Create Namespace and etcd Certificates + +```bash +kubectl apply -f contrib/production/kcp-comer/certificate-etcd.yaml +``` + +### 2. Deploy etcd Clusters with Enhanced Configuration + +```bash +kubectl apply -f contrib/production/kcp-comer/etcd-druid-root.yaml +kubectl apply -f contrib/production/kcp-comer/etcd-druid-alpha.yaml +``` + +**Verify etcd deployment**: +```bash +kubectl get etcd -n kcp-comer +kubectl wait --for=condition=Ready etcd -n kcp-comer --all --timeout=300s +``` + +### 3. Configure kcp System Certificates + +Set up multi-tier certificate management: + +```bash +kubectl apply -f contrib/production/kcp-comer/certificate-kcp.yaml +``` + +### 4. Deploy Dual Front-Proxy Architecture + +Deploy external and internal front-proxy layers: + +```bash +# NOTE: These files need to be customized with your domain names before applying +kubectl apply -f contrib/production/kcp-comer/kcp-root-shard.yaml +kubectl apply -f contrib/production/kcp-comer/kcp-alpha-shard.yaml +kubectl apply -f contrib/production/kcp-comer/kcp-front-proxy.yaml +kubectl apply -f contrib/production/kcp-comer/kcp-front-proxy-internal.yaml +``` + +4.1. Get the LoadBalancer IP: +```bash +kubectl get svc -n kcp-comer +``` + +Configure DNS records in CloudFlare (or your chosen CDN). + +4.2 Verify DNS resolution: +```bash +nslookup api.comer.example.com +``` + +4.3 Verify deployment: +```bash +kubectl get pods -n kcp-comer +``` + +### CloudFlare Configuration: + +Configure your CloudFlare dashboard: + +1. **Set `api.comer` to "Proxied"** (orange cloud icon) +2. **Add Page Rule**: "Rewrite port to 6443" for the API domain +3. **Upload Custom CA** in SSL/TLS tab so CloudFlare trusts the internal certificate: + ```bash + kubectl get secret -n kcp-comer root-ca -o jsonpath='{.data.ca\.crt}' | base64 -d + ``` + +### 5. Verify External Access + +**Verify the front-proxy is accessible:** + ```bash + # Note: No 6443 due to rewrite via CloudFlare + curl -k https://api.comer.example.com/healthz + ``` + + +## Important: Certificate Authentication Limitation + +Due to CloudFlare's certificate re-encryption, certificate-based authentication through the public front-proxy **will not work**. The certificate presented to clients is CloudFlare's certificate, not the internal front-proxy certificate. + +### Install kubectl OIDC Plugin + +```bash +# Homebrew (macOS and Linux) +brew install kubelogin + +# Krew (macOS, Linux, Windows and ARM) +kubectl krew install oidc-login + +# Chocolatey (Windows) +choco install kubelogin + +# For other platforms, see: https://github.com/int128/kubelogin +``` + +**Solution**: Use OIDC authentication for external access. + +### 6. Create Admin Access and Test + +```bash +kubectl apply -f contrib/production/kcp-comer/kubeconfig-kcp-admin.yaml + +kubectl get secret -n kcp-comer kcp-admin-frontproxy \ + -o jsonpath='{.data.kubeconfig}' | base64 -d > kcp-admin-kubeconfig-comer.yaml + + +# If you test this now, it will not work due to note above. Lets configure OIDC first. + +KUBECONFIG=kcp-admin-kubeconfig-comer.yaml \ +kubectl config set-credentials oidc \ + --exec-api-version=client.authentication.k8s.io/v1beta1 \ + --exec-command=kubectl \ + --exec-arg=oidc-login \ + --exec-arg=get-token \ + --exec-arg=--oidc-issuer-url="https://auth.example.com" \ + --exec-arg=--oidc-client-id="platform-mesh" \ + --exec-arg=--oidc-extra-scope="email" \ + --exec-arg=--oidc-client-secret=Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + +# And this should redirect to OIDC login flow but fails to list with lack of permissions. +KUBECONFIG=kcp-admin-kubeconfig-comer.yaml kubectl get shards --user oidc +``` + +Test access using internal front-proxy: + +```bash +kubectl apply -f contrib/production/kcp-comer/kubeconfig-kcp-admin-internal.yaml +kubectl get secret -n kcp-comer kcp-admin-frontproxy-internal \ + -o jsonpath='{.data.kubeconfig}' | base64 -d > kcp-admin-kubeconfig-comer-internal.yaml + +KUBECONFIG=kcp-admin-kubeconfig-comer-internal.yaml kubectl get shards +``` + +**Expected output**: +``` +KUBECONFIG=kcp-admin-kubeconfig-comer-internal.yaml kubectl get shards 13:26:14 +NAME REGION URL EXTERNAL URL AGE +alpha https://alpha.comer.example.com:6443 https://api.comer.example.com:443 18m +root https://root.comer.example.com:6443 https://api.comer.example.com:443 21m +``` diff --git a/docs/content/setup/production/kcp-dekker.md b/docs/content/setup/production/kcp-dekker.md new file mode 100644 index 00000000000..f6a3fe2cd51 --- /dev/null +++ b/docs/content/setup/production/kcp-dekker.md @@ -0,0 +1,159 @@ +--- +description: > + Deploy kcp with self-signed certificates using the kcp-dekker pattern for development and internal environments. +--- + +# kcp-dekker: Self-Signed Certificate Deployment + +The kcp-dekker deployment pattern uses self-signed certificates and is ideal for development, testing, or closed internal environments where certificate trust can be managed centrally. + +## Architecture Overview + +- **Certificate approach**: All certificates are self-signed using an internal CA +- **Access pattern**: Only front-proxy is publicly accessible, shards are private +- **Network**: Simple single-cluster deployment with cluster-internal shard communication +- **DNS requirements**: Single public DNS record for the front-proxy endpoint + +## Prerequisites + +Ensure all [shared components](prerequisites.md) are installed before proceeding. + +## DNS Configuration + +Create public DNS records for all endpoints: + +```bash +# Required DNS records +api.dekker.example.com → Front-proxy LoadBalancer IP +``` + +## Deployment Steps + +### 1. Create Namespace and etcd Certificates + +Create the deployment namespace and configure certificates for etcd clusters: + +```bash +kubectl create namespace kcp-dekker +kubectl apply -f contrib/production/kcp-dekker/certificate-etcd.yaml +``` + +### 2. Deploy etcd Clusters + +Deploy dedicated etcd clusters for root and alpha shards: + +```bash +kubectl apply -f contrib/production/kcp-dekker/etcd-druid-root.yaml +kubectl apply -f contrib/production/kcp-dekker/etcd-druid-alpha.yaml +``` + +**Verify etcd deployment**: +```bash +kubectl get etcd -n kcp-dekker +kubectl wait --for=condition=Ready etcd -n kcp-dekker --all --timeout=300s +``` + +### 3. Configure kcp System Certificates + +Set up certificates for kcp components using the internal CA: + +```bash +kubectl apply -f contrib/production/kcp-dekker/certificate-kcp.yaml +``` + +**Verify certificate issuance**: +```bash +kubectl get certificate -n kcp-dekker +``` + +### 4. Deploy KCP Components + +Deploy the kcp shards and front-proxy: + +```bash +# NOTE: These files needs to be customized with your domain names before applying +kubectl apply -f contrib/production/kcp-dekker/kcp-root-shard.yaml +kubectl apply -f contrib/production/kcp-dekker/kcp-alpha-shard.yaml +kubectl apply -f contrib/production/kcp-dekker/kcp-front-proxy.yaml +``` + +**Verify kcp deployment**: +```bash +kubectl get pods -n kcp-dekker +``` + +### 5. Configure DNS for Front-Proxy + +5.1. Get the LoadBalancer IP: + ```bash + kubectl get svc -n kcp-dekker frontproxy-front-proxy + ``` + +5.2. **Create DNS A record** pointing `api.dekker.example.com` to the LoadBalancer IP + +5.3. Verify DNS resolution: + ```bash + nslookup api.dekker.example.com + ``` + +5.4. Verify certificate issuance (may take a few minutes): + ```bash + kubectl get certificate -n kcp-dekker root-frontproxy-server -o yaml + ``` + +5.5. Verify the front-proxy is accessible: + ```bash + curl -k https://api.dekker.example.com:6443/healthz + ``` + +### 6. Create and Test Admin Access + +Generate admin kubeconfig and test cluster connectivity: + +```bash +kubectl apply -f contrib/production/kcp-dekker/kubeconfig-kcp-admin.yaml + +kubectl get secret -n kcp-dekker kcp-admin-frontproxy \ + -o jsonpath='{.data.kubeconfig}' | base64 -d > kcp-admin-kubeconfig-dekker.yaml + +KUBECONFIG=kcp-admin-kubeconfig-dekker.yaml kubectl get shards +``` + +**Expected output**: +``` +KUBECONFIG=kcp-admin-kubeconfig-dekker.yaml kubectl get shards 12:23:39 +NAME REGION URL EXTERNAL URL AGE +alpha https://alpha-shard-kcp.kcp-dekker.svc.cluster.local:6443 https://api.dekker.example.com:6443 10m +root https://root-kcp.kcp-dekker.svc.cluster.local:6443 https://api.dekker.example.com:6443 11m +``` + +### Install kubectl OIDC Plugin + +```bash +# Homebrew (macOS and Linux) +brew install kubelogin + +# Krew (macOS, Linux, Windows and ARM) +kubectl krew install oidc-login + +# Chocolatey (Windows) +choco install kubelogin + +# For other platforms, see: https://github.com/int128/kubelogin +``` + +### Configure OIDC Credentials + +```bash +kubectl config set-credentials oidc \ + --exec-api-version=client.authentication.k8s.io/v1beta1 \ + --exec-command=kubectl \ + --exec-arg=oidc-login \ + --exec-arg=get-token \ + --exec-arg=--oidc-issuer-url="https://auth.example.com" \ + --exec-arg=--oidc-client-id="platform-mesh" \ + --exec-arg=--oidc-extra-scope="email" \ + --exec-arg=--oidc-client-secret=Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== + +kubectl config set-context --current --user=oidc +``` \ No newline at end of file diff --git a/docs/content/setup/production/kcp-deployment-modes.svg b/docs/content/setup/production/kcp-deployment-modes.svg new file mode 100644 index 00000000000..cc8d6d93ea1 --- /dev/null +++ b/docs/content/setup/production/kcp-deployment-modes.svg @@ -0,0 +1,4 @@ + + + spec: baseURL: https://root-kcp.kcp-faros.svc.cluster.local:6443 externalURL: https://root-kcp.kcp-faros.svc.cluster.local:6443 virtualWorkspaceURL: https://10.42.1.151:6443if wShard.Spec.ExternalURL == "" { wShard.Spec.ExternalURL = wShard.Spec.BaseURL}if wShard.Spec.VirtualWorkspaceURL == "" { wShard.Spec.VirtualWorkspaceURL = wShard.Spec.BaseURL}--shard-virtual-workspace-url--shard-external-url--shard-base-url \ No newline at end of file diff --git a/docs/content/setup/production/kcp-vespucci.md b/docs/content/setup/production/kcp-vespucci.md new file mode 100644 index 00000000000..2d40de25d90 --- /dev/null +++ b/docs/content/setup/production/kcp-vespucci.md @@ -0,0 +1,164 @@ +--- +description: > + Deploy kcp with external certificates using Let's Encrypt for production environments requiring trusted certificates. +--- + +# kcp-vespucci: External Certificate Deployment + +The kcp-vespucci deployment pattern uses external certificates (Let's Encrypt) and is ideal for production environments where trusted certificates are required and both front-proxy and shards need public accessibility. + +## Architecture Overview + +- **Certificate approach**: Let's Encrypt for automatic certificate management +- **Access pattern**: Both front-proxy and shards are publicly accessible +- **Network**: Multi-zone deployment with external certificate validation +- **DNS requirements**: Multiple public DNS records for front-proxy and each shard + +## Prerequisites + +Ensure all [shared components](prerequisites.md) are installed before proceeding. + +**Additional requirements for kcp-vespucci:** +- Public DNS domain with ability to create multiple A records +- LoadBalancer service capability for multiple endpoints +- Let's Encrypt ACME challenge capability (HTTP-01 or DNS-01) + +## Deployment Steps + +### 1. Create a DNS Records + +Create public DNS records for all endpoints: + +```bash +# Required DNS records +api.vespucci.example.com → Front-proxy LoadBalancer IP +root.vespucci.example.com → Root shard LoadBalancer IP +alpha.vespucci.example.com → Alpha shard LoadBalancer IP +``` + +!! note + DNS records must be configured before certificate issuance begins. + +### 2. Create Namespace + +```bash +kubectl create namespace kcp-vespucci +kubectl apply -f contrib/production/kcp-vespucci/certificate-etcd.yaml +``` + +**Verify issuer is ready**: + +This was part of prerequisites but double-check. + +```bash +kubectl get clusterissuer letsencrypt-prod +``` + +### 3. Deploy etcd Clusters + +Deploy etcd clusters with external certificate support: + +```bash +kubectl apply -f contrib/production/kcp-vespucci/etcd-druid-root.yaml +kubectl apply -f contrib/production/kcp-vespucci/etcd-druid-alpha.yaml +``` + +**Verify etcd clusters**: +```bash +kubectl get etcd -n kcp-vespucci +kubectl wait --for=condition=Ready etcd -n kcp-vespucci --all --timeout=300s +``` + +### 4. Configure KCP System Certificates + +Set up certificates for kcp components using the internal CA: + +```bash +kubectl apply -f contrib/production/kcp-vespucci/certificate-kcp.yaml +``` + +**Verify certificate issuance**: +```bash +kubectl get certificate -n kcp-vespucci +``` + +### 5. Deploy KCP Components with External Access + +Because we use Let's Encrypt, and since kubectl needs explisit CA configuration, we need to deploy kcp components with extended CA bundle trust. This mighgt be different in your environment. + +```bash +curl -L -o isrgrootx1.pem https://letsencrypt.org/certs/isrgrootx1.pem +kubectl create secret generic letsencrypt-ca --from-file=tls.crt=isrgrootx1.pem -n kcp-vespucci +``` + +Deploy kcp components configured for public shard access: + +```bash +# NOTE: These files need to be customized with your domain names before applying +kubectl apply -f contrib/production/kcp-vespucci/kcp-root-shard.yaml +kubectl apply -f contrib/production/kcp-vespucci/kcp-alpha-shard.yaml +kubectl apply -f contrib/production/kcp-vespucci/kcp-front-proxy.yaml +``` + +**Verify deployment**: +```bash +kubectl get pods -n kcp-vespucci +``` + +### 6. Verify LoadBalancer Services + +Ensure all required LoadBalancer services are provisioned: + +```bash +kubectl get svc -n kcp-vespucci -o wide +``` + +**Expected services**: +``` +NAME TYPE EXTERNAL-IP PORT(S) AGE +frontproxy-front-proxy LoadBalancer 203.0.113.10 6443:30001/TCP 5m +root-kcp LoadBalancer 203.0.113.11 6443:30002/TCP 5m +alpha-shard-kcp LoadBalancer 203.0.113.12 6443:30003/TCP 5m +``` + +### 7. Update DNS Records with LoadBalancer IPs + +Update your DNS records with the actual LoadBalancer IP addresses: + +```bash +# Get LoadBalancer IPs (or CNAMEs if using DNS-based LoadBalancers) +kubectl get svc -n kcp-vespucci frontproxy-front-proxy -o jsonpath='{.status.loadBalancer}' +kubectl get svc -n kcp-vespucci root-kcp -o jsonpath='{.status.loadBalancer' +kubectl get svc -n kcp-vespucci alpha-shard-kcp -o jsonpath='{.status.loadBalancer}' +``` + +**Verify DNS propagation**: +```bash +nslookup api.vespucci.example.com +nslookup root.vespucci.example.com +nslookup alpha.vespucci.example.com +``` + +Verify the front-proxy is accessible: +```bash +curl -k https://api.vespucci.example.com:6443/healthz +``` + +### 8. Create Admin Access and Test Connectivity + +```bash +kubectl apply -f contrib/production/kcp-vespucci/kubeconfig-kcp-admin.yaml + +kubectl get secret -n kcp-vespucci kcp-admin-frontproxy \ + -o jsonpath='{.data.kubeconfig}' | base64 -d > kcp-admin-kubeconfig-vespucci.yaml + +KUBECONFIG=kcp-admin-kubeconfig-vespucci.yaml kubectl get shards +``` + +**Expected output**: +``` +KUBECONFIG=kcp-admin-kubeconfig-vespucci.yaml kubectl get shards +NAME REGION URL EXTERNAL URL AGE +alpha https://alpha.vespucci.example.com:6443 https://api.vespucci.example.com:6443 7m46s +root https://root.vespucci.example.com:6443 https://api.vespucci.example.com:6443 9m23s +``` diff --git a/docs/content/setup/production/overview.md b/docs/content/setup/production/overview.md new file mode 100644 index 00000000000..21f49032c69 --- /dev/null +++ b/docs/content/setup/production/overview.md @@ -0,0 +1,136 @@ +--- +description: > + Understanding kcp component communication patterns and architecture for production deployments. +--- + +# Architecture Overview + +Understanding kcp's component communication patterns is essential for designing production deployments. This guide explains how different kcp components interact and the network requirements for each deployment pattern. + +## kcp Component Communication + +In general, shards do not communicate directly with each other; all communication is proxied via the front-proxy or cache server. + +### Front-Proxy +The main API endpoint for clients to access kcp. This is the main entry point for all external consumer clients. + +**Communication patterns:** +- Shards do not communicate directly with the front-proxy, except in two cases: + + 1. **Workspace scheduling**: When a new workspace is scheduled, the shard contacts the front-proxy to randomly pick a shard for the new workspace + + 2. **Endpoint updates**: When an `APIExportEndpointSlice` or `CachedResourceEndpointSlice` URL is updated, the update happens via the front-proxy + +**Configuration:** +- Set `--externalHostname` or `spec.external.hostname` in front-proxy or shard configurations + +### Shards +Individual kcp shards that host workspaces. Shards can be exposed publicly or kept private. And by public, we mean accessible from outside the cluster network. +This means that clients like `kubectl` or `kcp` CLI can access the shard directly if needed and interact with workspaces hosted on that shard from outside the cluster network. + +**Configuration:** +- Set `spec.shardBaseURL` in the shard spec, or `--shard-base-url` flag in the shard deployment +- This URL exposes the main shard API server endpoint that the front-proxy uses to communicate with the shard + +### Virtual Workspaces +kcp supports running virtual workspaces outside shards, but the recommended approach is to run virtual workspaces inside shards. + +**Configuration:** +- Separate flag: `--virtual-workspace-base-url` (defaults to `spec.shardBaseURL`) +- External virtual workspace clients need access to these URLs + +## URL Configuration Examples + +After deployment, you can verify the configuration by checking shard objects: + +```bash +kubectl get shards +``` + +Output example: +``` +NAME REGION URL EXTERNAL URL AGE +alpha https://alpha.comer.example.com:6443 https://api.comer.example.com:443 6d20h +root https://root.comer.example.com:6443 https://api.comer.example.com:443 6d20h +``` + +Detailed shard specification: +```bash +kubectl get shards -o yaml | grep spec -A 3 +``` + +Output example: +```yaml +spec: + baseURL: https://alpha.comer.example.com:6443 + externalURL: https://api.comer.example.com:443 + virtualWorkspaceURL: https://alpha.comer.example.com:6443 +-- +spec: + baseURL: https://root.comer.example.com:6443 + externalURL: https://api.comer.example.com:443 + virtualWorkspaceURL: https://root.comer.example.com:6443 +``` + +## Virtual Workspace Endpoints + +The `virtualWorkspaceURL` is used to construct `VirtualWorkspace` endpoints. External virtual workspace clients need access to these URLs. + +Example endpoint slice: +```bash +KUBECONFIG=kcp-admin-kubeconfig.yaml kubectl get apiexportendpointslice tenancy.kcp.io -o yaml | grep endpoints -A 2 +``` + +Output: +```yaml +endpoints: + - url: https://root.comer.example.com:6443/services/apiexport/root/tenancy.kcp.io + - url: https://alpha.comer.example.com:6443/services/apiexport/alpha/tenancy.kcp.io +``` + +External clients like `syncer` must be able to access these URLs. + +## URL Defaulting Logic + +The system applies the following defaulting logic: + +```go +if shard.Spec.ExternalURL == "" { + shard.Spec.ExternalURL = shard.Spec.BaseURL +} + +if shard.Spec.VirtualWorkspaceURL == "" { + shard.Spec.VirtualWorkspaceURL = shard.Spec.BaseURL +} +``` + +## High-Level Architecture + +## Network Requirements by Deployment Type + +### kcp-dekker (Self-Signed) +- **Public access**: Front-proxy only +- **Private network**: Shards communicate via cluster-internal DNS +- **Certificate trust**: Clients need CA certificate in trust store + +![](self-signed.svg) + + +### kcp-vespucci (External Certs) +- **Public access**: Front-proxy and shards +- **External DNS**: Public DNS records for all endpoints +- **Certificate validation**: Automatic via Let's Encrypt + +![](public.svg) + +### kcp-comer (Dual Front-Proxy) +- **Public access**: CDN edge and front-proxy +- **Edge encryption**: CloudFlare integration +- **Certificate management**: Mixed (edge + internal) + +In this scenario we have two front-proxy. One secured by CloudFlare, but working only with OIDC auth, and another internal front-proxy +secured by an internal CA for internal clients. + +![](dual-proxy.svg) + +Understanding these patterns will help you choose the appropriate deployment strategy and configure networking correctly for your environment. \ No newline at end of file diff --git a/docs/content/setup/production/prerequisites.md b/docs/content/setup/production/prerequisites.md new file mode 100644 index 00000000000..cc4bf9d2a70 --- /dev/null +++ b/docs/content/setup/production/prerequisites.md @@ -0,0 +1,211 @@ +--- +description: > + Shared components and prerequisites required for all kcp production deployments. +--- + +# Prerequisites + +Before deploying any kcp production variant, you must install shared components that all deployments depend on. This guide covers the installation and configuration of these foundational components. + +- A Kubernetes cluster with sufficient resources +- `kubectl` configured to access your cluster +- `helm` CLI tool installed +- DNS management capability (manual or automated) +- (Optional) CloudFlare account for DNS01 challenges + +## Required Components + +All kcp production deployments require: + +1. **etcd-druid operator** - Database storage management +2. **cert-manager** - Certificate lifecycle management +3. **kcp-operator** - kcp resource lifecycle management +4. **OIDC provider (dex)** - Authentication services +5. **DNS configuration** - Domain name resolution +6. **LoadBalancer service** - External traffic routing + +## Installation Steps + +### 1. etcd-druid Operator + +etcd-druid manages etcd clusters for kcp shards with automated backup, restore, and scaling capabilities. + +```bash +# Install etcd-druid operator +helm install etcd-druid oci://europe-docker.pkg.dev/gardener-project/releases/charts/gardener/etcd-druid \ + --namespace etcd-druid \ + --create-namespace \ + --version v0.33.0 +``` + +### Install Required CRDs + +**Known Issue**: The etcd-druid chart doesn't install CRDs automatically. Install them manually: +([Issue #1185](https://github.com/gardener/etcd-druid/issues/1185)). +Once #1185 is released, this step can be skipped. + +```bash +kubectl apply -f contrib/production/etcd-druid/etcdcopybackupstasks.druid.gardener.cloud.yaml +kubectl apply -f contrib/production/etcd-druid/etcds.druid.gardener.cloud.yaml +``` + +### 2. cert-manager + +cert-manager automates certificate management for TLS encryption throughout the kcp deployment. + +```bash +helm repo add jetstack https://charts.jetstack.io +helm repo update + +helm upgrade \ + --install \ + --namespace cert-manager \ + --create-namespace \ + --version v1.18.2 \ + --set crds.enabled=true \ + --atomic \ + cert-manager jetstack/cert-manager +``` + +Optional: + +We gonna use the CloudFlare DNS01 challenge solver for Let's Encrypt certificates in some deployment variants. If you plan to use CloudFlare, install the cert-manager CloudFlare DNS01 solver: + +```bash +cp contrib/production/cert-manager/cluster-issuer.yaml.template kcp/assets/cert-manager/cluster-issuer.yaml +# Edit kcp/assets/cert-manager/cluster-issuer.yaml to add your Email. +kubectl apply -f kcp/assets/cert-manager/cluster-issuer.yaml + +cp contrib/production/cert-manager/cloudflare-secret.yaml.template kcp/assets/cert-manager/cloudflare-secret.yaml +# Edit kcp/assets/cert-manager/cloudflare-secret.yaml to add your CloudFlare API token. +kubectl apply -f kcp/assets/cert-manager/cloudflare-secret.yaml +``` + +### 3. kcp-operator + +The kcp-operator manages kcp resource lifecycle and ensures proper configuration. + +```bash +helm repo add kcp https://kcp-dev.github.io/helm-charts + +helm upgrade --install \ + --create-namespace \ + --namespace kcp-operator \ + kcp-operator kcp/kcp-operator +``` + +### 4. (Optional) OIDC Provider (Dex) + +If you have an existing OIDC provider, you can skip this section. This guide uses Dex as the OIDC provider with PostgreSQL as the backend database. + +### 4.1. Install PostgreSQL Operator + +Create the OIDC namespace and install CloudNative PostgreSQL operator: + +```bash +kubectl create namespace oidc + +kubectl apply --server-side -f \ + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.0.yaml +``` + +### 4.2. Deploy PostgreSQL Database + +Create a PostgreSQL cluster and database for Dex: + +```bash +kubectl apply -f contrib/production/oidc-dex/postgres-cluster.yaml +kubectl apply -f contrib/production/oidc-dex/postgres-database.yaml +``` + +### 4.3 Configure Dex Certificates + +Request a certificate for Dex from cert-manager: + +```bash +kubectl apply -f contrib/production/oidc-dex/certificate-dns.yaml +``` + +### 4.4 Deploy Dex + +```bash +# Check certificate status +kubectl get certificate -n oidc + +# Once the certificate is ready, generate Dex values file +cp contrib/production/oidc-dex/values.yaml.template contrib/production/oidc-dex/values.yaml +# Edit contrib/production/oidc-dex/values.yaml to set the correct domain name for Dex and database credentials. + +helm repo add dex https://charts.dexidp.io + +helm upgrade -i dex dex/dex \ + --create-namespace \ + --namespace oidc \ + -f contrib/production/oidc-dex/values.yaml +``` + +### 5. DNS Configuration + +Each deployment type requires specific DNS records. The exact requirements depend on your chosen variant: + +#### kcp-dekker (Self-Signed) +``` +api.dekker.example.com → LoadBalancer IP +``` + +#### kcp-vespucci (External Certs) +``` +api.vespucci.example.com → LoadBalancer IP +root.vespucci.example.com → LoadBalancer IP +alpha.vespucci.example.com → LoadBalancer IP +``` + +#### kcp-comer (Dual Front-Proxy) +``` +api.comer.example.com → CDN/LoadBalancer IP +root.comer.example.com → Internal LoadBalancer IP +alpha.comer.example.com → Internal LoadBalancer IP +``` + +## Verification Steps + +After installing all prerequisites, verify the installation: + +```bash +# Check all required namespaces exist +kubectl get namespaces | grep -E "(cert-manager|etcd-druid|kcp-operator|dex)" + +# Verify operators are running +kubectl get pods -A | grep -E "(cert-manager|etcd-druid|kcp-operator|dex)" +``` + +## Resource Requirements + +Minimum recommended resources for shared components: + +| Component | CPU | Memory | Storage | +|-----------|-----|--------|---------| +| etcd-druid | 100m | 128Mi | - | +| cert-manager | 100m | 128Mi | - | +| kcp-operator | 100m | 128Mi | - | +| dex | 100m | 64Mi | - | +| **Total** | **400m** | **448Mi** | - | + +## Security Considerations + +1. **Network policies**: Implement network policies to restrict communication between components +2. **RBAC**: Configure minimal required permissions for each component +3. **Secret management**: Use external secret management systems in production +4. **Certificate rotation**: Configure automatic certificate rotation policies +5. **Backup encryption**: Ensure etcd backups are encrypted at rest + +## Next Steps + +Once all prerequisites are installed and verified: + +1. Choose your deployment variant: + - [kcp-dekker](kcp-dekker.md) - Self-signed certificates + - [kcp-vespucci](kcp-vespucci.md) - External certificates + - [kcp-comer](kcp-comer.md) - Dual front-proxy + +2. Follow the specific deployment guide for your chosen variant diff --git a/docs/content/setup/production/public.svg b/docs/content/setup/production/public.svg new file mode 100644 index 00000000000..4400cbb947f --- /dev/null +++ b/docs/content/setup/production/public.svg @@ -0,0 +1,4 @@ + + +Shard1Provider WorkspaceKCPFront ProxyShard2External trafficconsumer workspaceconsumer workspace APIExportEndpointSliceAPIExportAPIBindingAPIBindingshard1/urlshard2/urlThese publically accessibleShard base URL PUBLICShard base URL PUBLICKubernetes clusterExternal controller/provider \ No newline at end of file diff --git a/docs/content/setup/production/self-signed.svg b/docs/content/setup/production/self-signed.svg new file mode 100644 index 00000000000..726d5dce7c4 --- /dev/null +++ b/docs/content/setup/production/self-signed.svg @@ -0,0 +1,4 @@ + + +Shard1Provider WorkspaceKCPFront ProxyShard2External trafficconsumer workspaceconsumer workspace APIExportEndpointSliceAPIExportAPIBindingAPIBindingshard1/urlshard2/urlInternal to platfrom/deploymentInternal controlelrsExternal controller/providerNot POSSIBLEKubernetes cluster \ No newline at end of file diff --git a/docs/scripts/serve-docs.sh b/docs/scripts/serve-docs.sh index 51647edfd4f..f9c2857420b 100755 --- a/docs/scripts/serve-docs.sh +++ b/docs/scripts/serve-docs.sh @@ -35,4 +35,8 @@ fi MIKE_OPTIONS+=(--ignore-remote-status) mike set-default "${MIKE_OPTIONS[@]}" --allow-undefined main -mike serve "${MIKE_OPTIONS[@]}" +if [[ -n "${DEV_MODE:-}" ]]; then + mkdocs serve --dev-addr=127.0.0.1:8000 --livereload +else + mike serve "${MIKE_OPTIONS[@]}" +fi From 4d93816e15079b0e326f9406a45ad0381e75fcc4 Mon Sep 17 00:00:00 2001 From: Mangirdas Judeikis Date: Wed, 26 Nov 2025 10:55:27 +0200 Subject: [PATCH 2/2] reviews updates --- contrib/production/cert-manager/certificate-example.yaml | 2 +- contrib/production/oidc-dex/certificate-dns.yaml | 2 +- docs/content/setup/production/index.md | 2 +- docs/content/setup/production/kcp-comer.md | 3 ++- docs/content/setup/production/kcp-dekker.md | 3 +++ 5 files changed, 8 insertions(+), 4 deletions(-) diff --git a/contrib/production/cert-manager/certificate-example.yaml b/contrib/production/cert-manager/certificate-example.yaml index a8a86b7b381..15e1a186975 100644 --- a/contrib/production/cert-manager/certificate-example.yaml +++ b/contrib/production/cert-manager/certificate-example.yaml @@ -10,7 +10,7 @@ spec: # Reference to the ClusterIssuer issuerRef: - name: kcp-comerletsencrypt-prod + name: letsencrypt-prod kind: ClusterIssuer # Domains for the certificate diff --git a/contrib/production/oidc-dex/certificate-dns.yaml b/contrib/production/oidc-dex/certificate-dns.yaml index 4b11ed13dc9..4a91016a5a3 100644 --- a/contrib/production/oidc-dex/certificate-dns.yaml +++ b/contrib/production/oidc-dex/certificate-dns.yaml @@ -7,7 +7,7 @@ metadata: spec: secretName: dex-tls issuerRef: - name: kcp-comerletsencrypt-prod + name: letsencrypt-prod kind: ClusterIssuer group: cert-manager.io dnsNames: diff --git a/docs/content/setup/production/index.md b/docs/content/setup/production/index.md index a1d64a117f6..48a83a8628e 100644 --- a/docs/content/setup/production/index.md +++ b/docs/content/setup/production/index.md @@ -155,7 +155,7 @@ We provide three reference deployment patterns: ### [kcp-vespucci](kcp-vespucci.md) - External Certificates - **Best for**: Production environments requiring trusted certificates -- **Certificate approach**: Let's Encrypt for front-proxy with public shard access +- **Certificate approach**: Let's Encrypt for front-proxy, self-signed certificates for shards - **Access pattern**: Both front-proxy and shards are publicly accessible - **Network**: Multi-zone deployment with external certificate validation diff --git a/docs/content/setup/production/kcp-comer.md b/docs/content/setup/production/kcp-comer.md index 00560b53402..90f73b1ebc3 100644 --- a/docs/content/setup/production/kcp-comer.md +++ b/docs/content/setup/production/kcp-comer.md @@ -132,7 +132,8 @@ choco install kubelogin # For other platforms, see: https://github.com/int128/kubelogin ``` -**Solution**: Use OIDC authentication for external access. +So to authenticate with the kcp-comer deployment for outside access, you must use OIDC authentication. + ### 6. Create Admin Access and Test diff --git a/docs/content/setup/production/kcp-dekker.md b/docs/content/setup/production/kcp-dekker.md index f6a3fe2cd51..34c7d97807c 100644 --- a/docs/content/setup/production/kcp-dekker.md +++ b/docs/content/setup/production/kcp-dekker.md @@ -156,4 +156,7 @@ kubectl config set-credentials oidc \ --exec-arg=--oidc-client-secret=Z2Fyc2lha2FsYmlzdmFuZGVuekWplCg== kubectl config set-context --current --user=oidc + +# And this should redirect to OIDC login flow but fails to list with lack of permissions. +KUBECONFIG=kcp-admin-kubeconfig-dekker.yaml kubectl get shards --user oidc ``` \ No newline at end of file