diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 376c819e..9e3d76ac 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -78,7 +78,9 @@ jobs: uses: actions/checkout@v4 - name: Build images locally - run: make provisioner-localpv-image + run: | + make provisioner-localpv-image + make pvc-manager-image - name: Setup Minikube-Kubernetes uses: medyagh/setup-minikube@latest @@ -205,9 +207,116 @@ jobs: DBUILD_SITE_URL=https://openebs.io BRANCH=${{ env.BRANCH }} + pvc-manager: + runs-on: ubuntu-latest + needs: ['integration-test'] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Go 1.19 + uses: actions/setup-go@v5 + with: + go-version: 1.19.13 + + - name: Set Image Org + # sets the default IMAGE_ORG to openebs + run: | + [ -z "${{ secrets.IMAGE_ORG }}" ] && IMAGE_ORG=openebs || IMAGE_ORG=${{ secrets.IMAGE_ORG}} + echo "IMAGE_ORG=${IMAGE_ORG}" >> $GITHUB_ENV + + - name: Set CI Tag + run: | + BRANCH=${{ github.ref_name }} + echo "BRANCH=$BRANCH" >> $GITHUB_ENV + CI_TAG=$(awk -F': ' '/^version:/ {print $2}' deploy/helm/charts/Chart.yaml) + echo "TAG=${CI_TAG}" >> $GITHUB_ENV + + - name: Set Build Date + id: date + run: | + echo "DATE=$(date -u +'%Y-%m-%dT%H:%M:%S%Z')" >> $GITHUB_OUTPUT + + - name: Docker meta + id: docker_meta + uses: docker/metadata-action@v4 + with: + # add each registry to which the image needs to be pushed here + images: | + ${{ env.IMAGE_ORG }}/pvc-manager + quay.io/${{ env.IMAGE_ORG }}/pvc-manager + ghcr.io/${{ env.IMAGE_ORG }}/pvc-manager + tags: | + type=raw,value=latest,enable=false + type=raw,value=${{ env.TAG }} + + - name: Print Tag info + run: | + echo "BRANCH: ${{ env.BRANCH }}" + echo "${{ steps.docker_meta.outputs.tags }}" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + with: + platforms: all + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + with: + version: v0.5.1 + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to Quay + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_TOKEN }} + + - name: Login to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Checkout + uses: actions/checkout@v4 + + - name: Build pvc-manager binary + run: make pvc-manager + + - name: Copy pvc-manager binary to build context + run: cp bin/pvc-manager/pvc-manager ./ + + - name: Build and Push multi-arch Image + uses: docker/build-push-action@v4 + with: + context: . + file: ./buildscripts/pvc-manager/Dockerfile + push: true + load: false + platforms: linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le + tags: | + ${{ steps.docker_meta.outputs.tags }} + build-args: | + DBUILD_DATE=${{ steps.date.outputs.DATE }} + DBUILD_REPO_URL=https://github.com/openebs/dynamic-localpv-provisioner + DBUILD_SITE_URL=https://openebs.io + BRANCH=${{ env.BRANCH }} + + - name: Clean up pvc-manager binary + run: rm -f ./pvc-manager + release-chart: runs-on: ubuntu-latest - needs: ['provisioner-localpv'] + needs: ['provisioner-localpv', 'pvc-manager'] steps: - uses: actions/checkout@v4 - name: Publish provisioner-localpv develop or prerelease helm chart diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 3d7f6b33..25a9f106 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -93,7 +93,9 @@ jobs: uses: actions/checkout@v4 - name: Build images locally - run: make provisioner-localpv-image + run: | + make provisioner-localpv-image + make pvc-manager-image - name: Setup Minikube-Kubernetes uses: medyagh/setup-minikube@latest @@ -153,3 +155,38 @@ jobs: push: false load: false platforms: linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le + + pvc-manager: + runs-on: ubuntu-latest + needs: ['integration-test'] + steps: + - name: Set up Go 1.19 + uses: actions/setup-go@v5 + with: + go-version: 1.19.13 + + - name: Setup QEMU + uses: docker/setup-qemu-action@v2 + with: + platforms: all + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + with: + version: v0.5.1 + + - name: Checkout + uses: actions/checkout@v4 + + - name: Build pvc-manager binary + run: make pvc-manager + + - name: Build multi-arch Image + uses: docker/build-push-action@v4 + with: + context: . + file: ./buildscripts/pvc-manager/Dockerfile + push: false + load: false + platforms: linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1816bf96..cb007486 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -91,10 +91,35 @@ jobs: QY: quay.io/${{ env.IMAGE_ORG }}/provisioner-localpv:${{ env.VERSION }} run: | nix-shell --pure --run "crane copy --platform all ${{ env.SRC }} ${{ env.QY }}" + + - name: Mirror pvc-manager multi-arch image with crane to Dockerhub + env: + SRC: ghcr.io/${{ env.IMAGE_ORG }}/dev/pvc-manager:${{ env.VERSION }} + DH: docker.io/${{ env.IMAGE_ORG }}/pvc-manager:${{ env.VERSION }} + run: | + nix-shell --pure --run "crane copy --platform all ${{ env.SRC }} ${{ env.DH }}" ./shell.nix + - name: Mirror pvc-manager multi-arch image with crane to Github + env: + SRC: ghcr.io/${{ env.IMAGE_ORG }}/dev/pvc-manager:${{ env.VERSION }} + GH: ghcr.io/${{ env.IMAGE_ORG }}/pvc-manager:${{ env.VERSION }} + run: | + nix-shell --pure --run "crane copy --platform all ${{ env.SRC }} ${{ env.GH }}" ./shell.nix + + - name: Mirror pvc-manager multi-arch image with crane to Quay + env: + SRC: ghcr.io/${{ env.IMAGE_ORG }}/dev/pvc-manager:${{ env.VERSION }} + QY: quay.io/${{ env.IMAGE_ORG }}/pvc-manager:${{ env.VERSION }} + run: | + nix-shell --pure --run "crane copy --platform all ${{ env.SRC }} ${{ env.QY }}" + - name: Update the registry and the repository run: | nix-shell --pure --run "./scripts/update-reg-repo.sh --registry docker.io/ --repository ${{ env.IMAGE_ORG }}/provisioner-localpv" ./shell.nix + + - name: Update the registry and the repository for pvc-manager + run: | + nix-shell --pure --run "./scripts/update-reg-repo.sh --registry docker.io/ --repository ${{ env.IMAGE_ORG }}/pvc-manager --component pvc-manager" ./shell.nix - name: Publish chart via helm-gh-pages uses: stefanprodan/helm-gh-pages@master diff --git a/.github/workflows/staging.yml b/.github/workflows/staging.yml index 82213237..79541fd0 100644 --- a/.github/workflows/staging.yml +++ b/.github/workflows/staging.yml @@ -81,7 +81,9 @@ jobs: uses: actions/checkout@v4 - name: Build images locally - run: make provisioner-localpv-image || exit 1; + run: | + make provisioner-localpv-image || exit 1; + make pvc-manager-image || exit 1; - name: Setup Minikube-Kubernetes uses: medyagh/setup-minikube@latest @@ -194,9 +196,94 @@ jobs: DBUILD_SITE_URL=https://openebs.io BRANCH=${{ env.BRANCH }} + pvc-manager: + needs: ['integration-test'] + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - uses: cachix/install-nix-action@v22 + - name: Pre-populate nix-shell + run: | + export NIX_PATH=nixpkgs=$(jq '.nixpkgs.url' nix/sources.json -r) + echo "NIX_PATH=$NIX_PATH" >> $GITHUB_ENV + nix-shell --pure --run "echo" ./shell.nix + + - name: Check if the chart is publishable + run: | + nix-shell --pure --run "./scripts/update-chart-version.sh --tag ${TAG} --publish-release" ./shell.nix + + - name: Set Image Org + run: | + [ -z "${{ secrets.IMAGE_ORG }}" ] && IMAGE_ORG=openebs || IMAGE_ORG=${{ secrets.IMAGE_ORG}} + echo "IMAGE_ORG=${IMAGE_ORG}" >> $GITHUB_ENV + + - name: Set IMAGE_TAG and BRANCH + run: | + BRANCH=${{ github.ref_name }} + echo "BRANCH=$BRANCH" >> $GITHUB_ENV + echo "IMAGE_TAG=$(awk -F': ' '/^version:/ {print $2}' deploy/helm/charts/Chart.yaml)" >> $GITHUB_ENV + + - name: Set Build Date + id: date + run: | + echo "DATE=$(date -u +'%Y-%m-%dT%H:%M:%S%Z')" >> $GITHUB_OUTPUT + + - name: Docker meta + id: docker_meta + uses: docker/metadata-action@v4 + with: + images: | + ghcr.io/${{ env.IMAGE_ORG }}/dev/pvc-manager + tags: | + type=raw,value=latest,enable=false + type=raw,value=${{ env.IMAGE_TAG }} + + - name: Print Tag info + run: | + echo "BRANCH: ${{ env.BRANCH }}" + echo "RELEASE_TAG: ${{ steps.docker_meta.outputs.tags }}" + + - name: Setup QEMU + uses: docker/setup-qemu-action@v2 + with: + platforms: all + + - name: Setup Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + with: + version: v0.5.1 + + - name: Login to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build pvc-manager binary + run: make pvc-manager + + - name: Build and Push Image + uses: docker/build-push-action@v4 + with: + context: . + file: ./buildscripts/pvc-manager/Dockerfile + push: true + platforms: linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le + tags: | + ${{ steps.docker_meta.outputs.tags }} + build-args: | + DBUILD_DATE=${{ steps.date.outputs.DATE }} + DBUILD_REPO_URL=https://github.com/openebs/dynamic-localpv-provisioner + DBUILD_SITE_URL=https://openebs.io + BRANCH=${{ env.BRANCH }} + release-chart: runs-on: ubuntu-latest - needs: ["provisioner-localpv"] + needs: ["provisioner-localpv", "pvc-manager"] steps: - uses: actions/checkout@v4 @@ -220,6 +307,10 @@ jobs: run: | nix-shell --pure --run "./scripts/update-reg-repo.sh --registry ghcr.io/ --repository ${{ env.IMAGE_ORG }}/dev/provisioner-localpv" ./shell.nix + - name: Update the registry and repository for pvc-manager in values.yaml + run: | + nix-shell --pure --run "./scripts/update-reg-repo.sh --registry ghcr.io/ --repository ${{ env.IMAGE_ORG }}/dev/pvc-manager --component pvc-manager" ./shell.nix + - name: Set Chart Version run: | TAG=$(awk -F': ' '/^version:/ {print $2}' deploy/helm/charts/Chart.yaml) diff --git a/Makefile b/Makefile index 4fbde116..5853e6a4 100644 --- a/Makefile +++ b/Makefile @@ -62,12 +62,15 @@ endif # Specify the name for the binaries PROVISIONER_LOCALPV=provisioner-localpv +PVC_MANAGER=pvc-manager # Specify the name of the image PROVISIONER_LOCALPV_IMAGE?=provisioner-localpv +PVC_MANAGER_IMAGE?=pvc-manager # Final variable with image org, name and tag PROVISIONER_LOCALPV_IMAGE_TAG=${IMAGE_ORG}/${PROVISIONER_LOCALPV_IMAGE}:${TAG} +PVC_MANAGER_IMAGE_TAG=${IMAGE_ORG}/${PVC_MANAGER_IMAGE}:${TAG} # Specify the date of build DBUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ') @@ -97,7 +100,7 @@ EXTERNAL_TOOLS=\ export DBUILD_ARGS=--build-arg DBUILD_DATE=${DBUILD_DATE} --build-arg DBUILD_REPO_URL=${DBUILD_REPO_URL} --build-arg DBUILD_SITE_URL=${DBUILD_SITE_URL} --build-arg BRANCH=${BRANCH} --build-arg RELEASE_TAG=${RELEASE_TAG} .PHONY: all -all: test provisioner-localpv-image +all: test provisioner-localpv-image pvc-manager-image .PHONY: deps deps: @@ -126,8 +129,10 @@ clean: rm -rf bin ./ci/ci-test.sh clean chmod -R u+w ${GOPATH}/bin/${PROVISIONER_LOCALPV} 2>/dev/null || true + chmod -R u+w ${GOPATH}/bin/${PVC_MANAGER} 2>/dev/null || true chmod -R u+w ${GOPATH}/pkg/* 2>/dev/null || true rm -rf ${GOPATH}/bin/${PROVISIONER_LOCALPV} + rm -rf ${GOPATH}/bin/${PVC_MANAGER} rm -rf ${GOPATH}/pkg/* .PHONY: test @@ -173,6 +178,14 @@ provisioner-localpv: @echo "----------------------------" @PNAME=${PROVISIONER_LOCALPV} CTLNAME=${PROVISIONER_LOCALPV} sh -c "'./buildscripts/build.sh'" +#Use this to build pvc-manager +.PHONY: pvc-manager +pvc-manager: + @echo "----------------------------" + @echo "--> pvc-manager " + @echo "----------------------------" + @PNAME=${PVC_MANAGER} CTLNAME=${PVC_MANAGER} sh -c "'./buildscripts/build.sh'" + .PHONY: provisioner-localpv-image provisioner-localpv-image: provisioner-localpv @echo "-------------------------------" @@ -182,6 +195,15 @@ provisioner-localpv-image: provisioner-localpv @cd buildscripts/provisioner-localpv && docker build -t ${PROVISIONER_LOCALPV_IMAGE_TAG} ${DBUILD_ARGS} . --no-cache @rm buildscripts/provisioner-localpv/${PROVISIONER_LOCALPV} +.PHONY: pvc-manager-image +pvc-manager-image: pvc-manager + @echo "-------------------------------" + @echo "--> pvc-manager image " + @echo "-------------------------------" + @cp bin/pvc-manager/${PVC_MANAGER} buildscripts/pvc-manager/ + @cd buildscripts/pvc-manager && docker build -t ${PVC_MANAGER_IMAGE_TAG} ${DBUILD_ARGS} . --no-cache + @rm buildscripts/pvc-manager/${PVC_MANAGER} + .PHONY: image-tag image-tag: @echo ${TAG} @@ -197,6 +219,7 @@ image-ref: .PHONY: push push: DIMAGE=${IMAGE_ORG}/${PROVISIONER_LOCALPV_IMAGE} ./buildscripts/push.sh + DIMAGE=${IMAGE_ORG}/${PVC_MANAGER_IMAGE} ./buildscripts/push.sh # include the buildx recipes include Makefile.buildx.mk diff --git a/Makefile.buildx.mk b/Makefile.buildx.mk index 1e0d8053..fc314d17 100644 --- a/Makefile.buildx.mk +++ b/Makefile.buildx.mk @@ -46,6 +46,15 @@ docker.buildx.provisioner-localpv: DOCKERX_IMAGE_NAME=$(PROVISIONER_LOCALPV_IMAG docker.buildx.provisioner-localpv: COMPONENT=$(PROVISIONER_LOCALPV) docker.buildx.provisioner-localpv: docker.buildx +.PHONY: docker.buildx.pvc-manager +docker.buildx.pvc-manager: DOCKERX_IMAGE_NAME=$(PVC_MANAGER_IMAGE_TAG) +docker.buildx.pvc-manager: COMPONENT=$(PVC_MANAGER) +docker.buildx.pvc-manager: docker.buildx + .PHONY: buildx.push.provisioner-localpv buildx.push.provisioner-localpv: BUILDX=true DIMAGE=${IMAGE_ORG}/provisioner-localpv ./buildscripts/push.sh + +.PHONY: buildx.push.pvc-manager +buildx.push.pvc-manager: + BUILDX=true DIMAGE=${IMAGE_ORG}/pvc-manager ./buildscripts/push.sh diff --git a/README.md b/README.md index ddf84c95..b3a212fc 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,8 @@ OpenEBS Dynamic LocalPV Provisioner is an open‐source Kubernetes component tha ## Architecture +### Traditional Architecture (Helper Pods) + ```mermaid graph TD @@ -66,6 +68,51 @@ graph TD ``` +### New Architecture (PVC Manager DaemonSet) + +```mermaid + +graph TD + %% Define Styles with Black Text + style kubelet fill:#ffcc00,stroke:#d4a017,stroke-width:2px,color:#000 + style Provisioner fill:#66ccff,stroke:#3388cc,stroke-width:2px,color:#000 + style PVCManager fill:#99ff99,stroke:#44aa44,stroke-width:2px,color:#000 + style App fill:#ff9999,stroke:#cc6666,stroke-width:2px,color:#000 + style Hostpath fill:#ffdd99,stroke:#d4a017,stroke-width:2px,color:#000 + style PVC fill:#ffdd99,stroke:#d4a017,stroke-width:2px,color:#000 + style PV fill:#d9b3ff,stroke:#9955cc,stroke-width:2px,color:#000 + + subgraph "Kubernetes Cluster" + subgraph "Kubelet" + kubelet["Kubelet"] + end + subgraph "OpenEBS LocalPV" + Provisioner["LocalPV Provisioner"] + end + subgraph "Worker Node (Every Node)" + PVCManager["PVC Manager (DaemonSet)"] + App["Application Pod"] + PVC["Persistent Volume Claim"] + PV["Persistent Volume"] + Hostpath["[User-defined path on host]"] + end + end + + %% Storage Flow + App -->|Requests Storage| PVC + PVC -->|Binds to| PV + PV -->|Mounted on| Hostpath + kubelet -->|Mounts Path| Hostpath + + %% Provisioning Flow (New) + Provisioner -->|HTTP Request| PVCManager + Provisioner -->|Watches PVC Requests| Provisioner + Provisioner -->|Creates PV| PV + PVCManager -->|Creates/Deletes Directory| Hostpath + PV -->|Bound to| PVC + +``` + Please check [here](./design/hostpath_localpv_provisioner.md) for complete design and architecture. ## Kubernetes Compatibility Matrix @@ -90,6 +137,9 @@ Please check [here](./design/hostpath_localpv_provisioner.md) for complete desig ## Features +- [x] **Two Architecture Modes**: + - [x] **Helper Pod Mode** (Traditional): Creates ephemeral pods for volume operations + - [x] **PVC Manager Mode** (New): Uses DaemonSet with HTTP API for faster operations - [x] Access Modes - [x] ReadWriteOnce - ~~ReadOnlyMany~~ @@ -99,6 +149,25 @@ Please check [here](./design/hostpath_localpv_provisioner.md) for complete desig - [ ] `Block` mode - [x] [Volume Resize(Via Quotas)](./docs/tutorials/hostpath/xfs_quota/) +### PVC Manager Architecture Benefits + +The new PVC Manager architecture provides: + +- **Better Performance**: Eliminates pod creation overhead for volume operations +- **Reduced API Server Load**: Fewer ephemeral pod objects created/destroyed +- **Improved Reliability**: Persistent service always available for volume operations +- **Enhanced Observability**: HTTP API with health checks and metrics +- **Backward Compatibility**: Can be disabled to use traditional helper pod mode + +To enable PVC Manager mode, set the environment variable: +```yaml +env: +- name: OPENEBS_IO_ENABLE_PVC_MANAGER + value: "true" +``` + +[Learn more about PVC Manager architecture](./docs/pvc-manager-architecture.md) + ## Inspiration/Credit OpenEBS Local PV has been inspired by the prior work done by the following the Kubernetes projects: diff --git a/buildscripts/pvc-manager/Dockerfile b/buildscripts/pvc-manager/Dockerfile new file mode 100644 index 00000000..6b69de24 --- /dev/null +++ b/buildscripts/pvc-manager/Dockerfile @@ -0,0 +1,28 @@ +FROM alpine:3.20.1 + +RUN apk add --no-cache \ + iproute2 \ + bash \ + util-linux \ + e2fsprogs \ + xfsprogs \ + xfsprogs-extra \ + blkid \ + findmnt \ + ca-certificates \ + quota-tools + +COPY pvc-manager /usr/local/bin/ + +ARG DBUILD_DATE +ARG DBUILD_REPO_URL +ARG DBUILD_SITE_URL +LABEL org.label-schema.name="pvc-manager" +LABEL org.label-schema.description="OpenEBS LocalPV PVC Manager" +LABEL org.label-schema.url="$DBUILD_SITE_URL" +LABEL org.label-schema.vcs-url="$DBUILD_REPO_URL" +LABEL org.label-schema.schema-version="1.0" +LABEL org.label-schema.build-date="$DBUILD_DATE" + +ENTRYPOINT ["/usr/local/bin/pvc-manager"] +CMD ["--help"] \ No newline at end of file diff --git a/cmd/provisioner-localpv/app/env.go b/cmd/provisioner-localpv/app/env.go index a96d73e5..515f6044 100644 --- a/cmd/provisioner-localpv/app/env.go +++ b/cmd/provisioner-localpv/app/env.go @@ -31,11 +31,18 @@ const ( // ProvisionerImagePullSecrets is the environment variable that provides the // init pod to use as authentication when pulling helper image, it is used in the scene where authentication is required ProvisionerImagePullSecrets menv.ENVKey = "OPENEBS_IO_IMAGE_PULL_SECRETS" + + // ProvisionerEnablePVCManager enables the PVC Manager mode instead of helper pods + ProvisionerEnablePVCManager string = "OPENEBS_IO_ENABLE_PVC_MANAGER" + + // ProvisionerPVCManagerPort is the port where PVC Manager service listens + ProvisionerPVCManagerPort string = "OPENEBS_IO_PVC_MANAGER_PORT" ) var ( - defaultHelperImage = "openebs/linux-utils:latest" - defaultBasePath = "/var/openebs/local" + defaultHelperImage = "openebs/linux-utils:latest" + defaultBasePath = "/var/openebs/local" + defaultPVCManagerPort = "8080" ) func getOpenEBSNamespace() string { @@ -59,3 +66,19 @@ func getOpenEBSServiceAccountName() string { func getOpenEBSImagePullSecrets() string { return menv.Get(ProvisionerImagePullSecrets) } + +// GetEnv gets an environment variable value +func GetEnv(key string) string { + return menv.Get(menv.ENVKey(key)) +} + +// getPVCManagerEnabled returns whether PVC Manager mode is enabled +func getPVCManagerEnabled() bool { + val, _ := k8sEnv.GetBool(ProvisionerEnablePVCManager, true) // Default to true for new architecture + return val +} + +// getPVCManagerPort returns the port for PVC Manager service +func getPVCManagerPort() string { + return k8sEnv.GetString(ProvisionerPVCManagerPort, defaultPVCManagerPort) +} diff --git a/cmd/provisioner-localpv/app/helper_hostpath.go b/cmd/provisioner-localpv/app/helper_hostpath.go index af86b734..8c411f75 100644 --- a/cmd/provisioner-localpv/app/helper_hostpath.go +++ b/cmd/provisioner-localpv/app/helper_hostpath.go @@ -9,16 +9,16 @@ import ( "strings" "time" + "github.com/openebs/dynamic-localpv-provisioner/pkg/kubernetes/api/core/v1/container" + "github.com/openebs/dynamic-localpv-provisioner/pkg/kubernetes/api/core/v1/pod" + "github.com/openebs/dynamic-localpv-provisioner/pkg/kubernetes/api/core/v1/volume" hostpath "github.com/openebs/maya/pkg/hostpath/v1alpha1" errors "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - - "github.com/openebs/dynamic-localpv-provisioner/pkg/kubernetes/api/core/v1/container" - "github.com/openebs/dynamic-localpv-provisioner/pkg/kubernetes/api/core/v1/pod" - "github.com/openebs/dynamic-localpv-provisioner/pkg/kubernetes/api/core/v1/volume" + "sigs.k8s.io/yaml" ) type podConfig struct { @@ -213,7 +213,21 @@ func (p *Provisioner) createCleanupPod(ctx context.Context, pOpts *HelperPodOpti config.taints = pOpts.selectedNodeTaints - config.pOpts.cmdsForPath = append(config.pOpts.cmdsForPath, filepath.Join("/data/", config.volumeDir)) + path := filepath.Join("/data/", config.volumeDir) + + scripts := "" + + "FS=`stat -f -c %T " + path + "` ; " + + "if [[ \"$FS\" == \"xfs\" ]]; then " + + "id=`xfs_io -c stat " + path + " 2>/dev/null | grep projid | head -1 | awk -F '=' '{print $2}' | tr -d ' '` ; " + + "echo \"projid=$id\" ; " + + "if [[ -n \"$id\" && \"$id\" != \"0\" ]]; then " + + "xfs_io -c 'chproj -R 0' " + path + " 2>/dev/null || true ; " + + "xfs_quota -x -c \"limit -p bsoft=0 bhard=0 $id\" /data 2>/dev/null || true ; " + + "fi ; " + + "fi ; " + + "rm -rf " + path + + config.pOpts.cmdsForPath = []string{"sh", "-c", scripts} _, err := p.launchPod(ctx, config) if err != nil && !k8serror.IsAlreadyExists(err) { @@ -349,6 +363,13 @@ func (p *Provisioner) launchPod(ctx context.Context, config podConfig) (*corev1. return nil, err } + helperPodYAML, err := yaml.Marshal(helperPod) + if err != nil { + klog.Errorf("failed to marshal helper pod to YAML: %v", err) + } else { + klog.Infof("launching helper pod: %s", string(helperPodYAML)) + } + var hPod *corev1.Pod //Launch the helper pod. diff --git a/cmd/provisioner-localpv/app/helper_pvc_manager.go b/cmd/provisioner-localpv/app/helper_pvc_manager.go new file mode 100644 index 00000000..c38b8ab6 --- /dev/null +++ b/cmd/provisioner-localpv/app/helper_pvc_manager.go @@ -0,0 +1,203 @@ +package app + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +// createInitVolumeViaManager creates a volume via the PVC Manager HTTP API instead of using helper pods +func (p *Provisioner) createInitVolumeViaManager(ctx context.Context, pOpts *HelperPodOptions) error { + klog.Infof("Creating init volume %s via PVC Manager", pOpts.name) + + // Get the Pod IP address from PVC Manager pod + podIP, err := p.getPVCManagerPodIPFromLabels(pOpts.nodeAffinityLabels) + if err != nil { + return fmt.Errorf("failed to get PVC Manager Pod IP: %v", err) + } + + // Create PVC Manager client using Pod IP address + pvcManagerURL := GetPVCManagerURL(podIP) + client := NewPVCManagerClient(pvcManagerURL) + + // Prepare the request + req := &PVCManagerRequest{ + Name: pOpts.name, + Path: pOpts.path, + NodeAffinityLabels: pOpts.nodeAffinityLabels, + FsMode: "0777", // Default file permissions + Commands: pOpts.cmdsForPath, + } + + // Send create volume request + if err := client.CreateVolume(ctx, req); err != nil { + return fmt.Errorf("failed to create volume via PVC Manager: %v", err) + } + + klog.Infof("Successfully created init volume %s via PVC Manager", pOpts.name) + return nil +} + +// createQuotaViaManager applies quota via the PVC Manager HTTP API instead of using helper pods +func (p *Provisioner) createQuotaViaManager(ctx context.Context, pOpts *HelperPodOptions) error { + klog.Infof("Applying quota for volume %s via PVC Manager", pOpts.name) + + // Get the Pod IP address from PVC Manager pod + podIP, err := p.getPVCManagerPodIPFromLabels(pOpts.nodeAffinityLabels) + if err != nil { + return fmt.Errorf("failed to get PVC Manager Pod IP: %v", err) + } + + // Create PVC Manager client using Pod IP address + pvcManagerURL := GetPVCManagerURL(podIP) + client := NewPVCManagerClient(pvcManagerURL) + + // Prepare the request + req := &PVCManagerRequest{ + Name: pOpts.name, + Path: pOpts.path, + NodeAffinityLabels: pOpts.nodeAffinityLabels, + SoftLimitGrace: pOpts.softLimitGrace, + HardLimitGrace: pOpts.hardLimitGrace, + PVCStorage: pOpts.pvcStorage, + } + + // Send apply quota request + if err := client.ApplyQuota(ctx, req); err != nil { + return fmt.Errorf("failed to apply quota via PVC Manager: %v", err) + } + + klog.Infof("Successfully applied quota for volume %s via PVC Manager", pOpts.name) + return nil +} + +// createCleanupViaManager deletes a volume via the PVC Manager HTTP API instead of using helper pods +func (p *Provisioner) createCleanupViaManager(ctx context.Context, pOpts *HelperPodOptions) error { + klog.Infof("Deleting volume %s via PVC Manager", pOpts.name) + + // Get the Pod IP address from PVC Manager pod + podIP, err := p.getPVCManagerPodIPFromLabels(pOpts.nodeAffinityLabels) + if err != nil { + return fmt.Errorf("failed to get PVC Manager Pod IP: %v", err) + } + + // Create PVC Manager client using Pod IP address + pvcManagerURL := GetPVCManagerURL(podIP) + client := NewPVCManagerClient(pvcManagerURL) + + // Prepare the request + req := &PVCManagerRequest{ + Name: pOpts.name, + Path: pOpts.path, + NodeAffinityLabels: pOpts.nodeAffinityLabels, + Commands: pOpts.cmdsForPath, + } + + // Send delete volume request + if err := client.DeleteVolume(ctx, req); err != nil { + return fmt.Errorf("failed to delete volume via PVC Manager: %v", err) + } + + klog.Infof("Successfully deleted volume %s via PVC Manager", pOpts.name) + return nil +} + +// getPVCManagerPodIPFromLabels extracts the PVC Manager Pod IP from node affinity labels +func (p *Provisioner) getPVCManagerPodIPFromLabels(nodeAffinityLabels map[string]string) (string, error) { + // Check if kubeClient is initialized + if p.kubeClient == nil { + return "", fmt.Errorf("kubeClient is not initialized") + } + + // Get the node hostname from node affinity labels + nodeHostname, err := p.getNodeHostnameFromLabels(nodeAffinityLabels) + if err != nil { + return "", fmt.Errorf("failed to get node hostname: %v", err) + } + + // Find the PVC Manager pod running on this node + pods, err := p.kubeClient.CoreV1().Pods("openebs").List( + context.TODO(), + metav1.ListOptions{ + LabelSelector: "app=pvc-manager", + FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeHostname), + }, + ) + if err != nil { + return "", fmt.Errorf("failed to list PVC Manager pods: %v", err) + } + + if len(pods.Items) == 0 { + return "", fmt.Errorf("no PVC Manager pod found on node %s", nodeHostname) + } + + if len(pods.Items) > 1 { + return "", fmt.Errorf("multiple PVC Manager pods found on node %s", nodeHostname) + } + + pod := pods.Items[0] + if pod.Status.PodIP == "" { + return "", fmt.Errorf("PVC Manager pod on node %s has no Pod IP", nodeHostname) + } + + return pod.Status.PodIP, nil +} + +// getNodeIPFromLabels extracts the node IP address from node affinity labels +func (p *Provisioner) getNodeIPFromLabels(nodeAffinityLabels map[string]string) (string, error) { + // Get the node object first + nodeObject, err := p.GetNodeObjectFromLabels(nodeAffinityLabels) + if err != nil { + return "", fmt.Errorf("failed to get node object: %v", err) + } + + // Extract the internal IP address from the node + for _, address := range nodeObject.Status.Addresses { + if address.Type == corev1.NodeInternalIP { + return address.Address, nil + } + } + + // Fallback to external IP if internal IP is not available + for _, address := range nodeObject.Status.Addresses { + if address.Type == corev1.NodeExternalIP { + return address.Address, nil + } + } + + return "", fmt.Errorf("no IP address found for node") +} + +// getNodeHostnameFromLabels extracts the node hostname from node affinity labels +func (p *Provisioner) getNodeHostnameFromLabels(nodeAffinityLabels map[string]string) (string, error) { + // Try to get hostname from kubernetes.io/hostname label first + if hostname, exists := nodeAffinityLabels[k8sNodeLabelKeyHostname]; exists { + return hostname, nil + } + + // If not found, try to get the node object and extract hostname + nodeObject, err := p.GetNodeObjectFromLabels(nodeAffinityLabels) + if err != nil { + return "", fmt.Errorf("failed to get node object: %v", err) + } + + hostname := GetNodeHostname(nodeObject) + if hostname == "" { + return "", fmt.Errorf("node hostname is empty") + } + + return hostname, nil +} + +// isPVCManagerEnabled checks if PVC Manager mode is enabled via environment variable +func isPVCManagerEnabled() bool { + return getPVCManagerEnabled() +} + +// GetPVCManagerPort returns the port for PVC Manager service +func GetPVCManagerPort() string { + return getPVCManagerPort() +} diff --git a/cmd/provisioner-localpv/app/provisioner_hostpath.go b/cmd/provisioner-localpv/app/provisioner_hostpath.go index eec820b2..3819773b 100644 --- a/cmd/provisioner-localpv/app/provisioner_hostpath.go +++ b/cmd/provisioner-localpv/app/provisioner_hostpath.go @@ -2,6 +2,7 @@ package app import ( "context" + "strings" "github.com/openebs/maya/pkg/alertlog" mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" @@ -22,6 +23,21 @@ const ( HardLimitGrace string = "hardLimitGrace" ) +// isQuotaCommandMissingError checks if the error is related to missing quota commands +func isQuotaCommandMissingError(err error) bool { + if err == nil { + return false + } + + errStr := err.Error() + // Check for common patterns indicating missing quota commands + return strings.Contains(errStr, "xfs_quota: not found") || + strings.Contains(errStr, "command not found") || + strings.Contains(errStr, "quota commands not available") || + strings.Contains(errStr, "xfs_quota command not found") || + strings.Contains(errStr, "ext quota commands not found") +} + // ProvisionHostPath is invoked by the Provisioner which expect HostPath PV // // to be provisioned and a valid PV spec returned. @@ -79,7 +95,14 @@ func (p *Provisioner) ProvisionHostPath(ctx context.Context, opts pvController.P imagePullSecrets: imagePullSecrets, hostNetwork: hostNetwork, } - iErr := p.createInitPod(ctx, podOpts) + // Use PVC Manager if enabled, otherwise fallback to helper pods + var iErr error + if isPVCManagerEnabled() { + iErr = p.createInitVolumeViaManager(ctx, podOpts) + } else { + iErr = p.createInitPod(ctx, podOpts) + } + if iErr != nil { klog.Infof("Initialize volume %v failed: %v", name, iErr) alertlog.Logger.Errorw("", @@ -109,16 +132,34 @@ func (p *Provisioner) ProvisionHostPath(ctx context.Context, opts pvController.P pvcStorage: pvcStorage, hostNetwork: hostNetwork, } - iErr := p.createQuotaPod(ctx, podOpts) + // Use PVC Manager if enabled, otherwise fallback to helper pods + var iErr error + if isPVCManagerEnabled() { + iErr = p.createQuotaViaManager(ctx, podOpts) + } else { + iErr = p.createQuotaPod(ctx, podOpts) + } if iErr != nil { klog.Infof("Applying quota failed: %v", iErr) - alertlog.Logger.Errorw("", - "eventcode", "local.pv.provision.failure", - "msg", "Failed to provision Local PV", - "rname", opts.PVName, - "reason", "Quota enforcement failed", - "storagetype", stgType, - ) + // Check if the error is due to missing quota commands and provide a more informative message + if isQuotaCommandMissingError(iErr) { + klog.Infof("Quota commands not found. Please ensure xfsprogs (for XFS) or quota tools (for ext4) are installed on the PVC Manager container") + alertlog.Logger.Errorw("", + "eventcode", "local.pv.provision.failure", + "msg", "Failed to provision Local PV", + "rname", opts.PVName, + "reason", "Quota enforcement failed - missing quota tools in PVC Manager container", + "storagetype", stgType, + ) + } else { + alertlog.Logger.Errorw("", + "eventcode", "local.pv.provision.failure", + "msg", "Failed to provision Local PV", + "rname", opts.PVName, + "reason", "Quota enforcement failed", + "storagetype", stgType, + ) + } return nil, pvController.ProvisioningFinished, iErr } alertlog.Logger.Infow("", @@ -146,16 +187,34 @@ func (p *Provisioner) ProvisionHostPath(ctx context.Context, opts pvController.P pvcStorage: pvcStorage, hostNetwork: hostNetwork, } - iErr := p.createQuotaPod(ctx, podOpts) + // Use PVC Manager if enabled, otherwise fallback to helper pods + var iErr error + if isPVCManagerEnabled() { + iErr = p.createQuotaViaManager(ctx, podOpts) + } else { + iErr = p.createQuotaPod(ctx, podOpts) + } if iErr != nil { klog.Infof("Applying quota failed: %v", iErr) - alertlog.Logger.Errorw("", - "eventcode", "local.pv.provision.failure", - "msg", "Failed to provision Local PV", - "rname", opts.PVName, - "reason", "Quota enforcement failed", - "storagetype", stgType, - ) + // Check if the error is due to missing quota commands and provide a more informative message + if isQuotaCommandMissingError(iErr) { + klog.Infof("Quota commands not found. Please ensure xfsprogs (for XFS) or quota tools (for ext4) are installed on the PVC Manager container") + alertlog.Logger.Errorw("", + "eventcode", "local.pv.provision.failure", + "msg", "Failed to provision Local PV", + "rname", opts.PVName, + "reason", "Quota enforcement failed - missing quota tools in PVC Manager container", + "storagetype", stgType, + ) + } else { + alertlog.Logger.Errorw("", + "eventcode", "local.pv.provision.failure", + "msg", "Failed to provision Local PV", + "rname", opts.PVName, + "reason", "Quota enforcement failed", + "storagetype", stgType, + ) + } return nil, pvController.ProvisioningFinished, iErr } alertlog.Logger.Infow("", @@ -276,9 +335,9 @@ func (p *Provisioner) DeleteHostPath(ctx context.Context, pv *v1.PersistentVolum //Initiate clean up only when reclaim policy is not retain. klog.Infof("Deleting volume %v at %v:%v", pv.Name, GetNodeHostname(nodeObject), path) - cleanupCmdsForPath := []string{"rm", "-rf"} + podOpts := &HelperPodOptions{ - cmdsForPath: cleanupCmdsForPath, + cmdsForPath: []string{}, name: pv.Name, path: path, nodeAffinityLabels: nodeAffinityLabels, @@ -288,8 +347,16 @@ func (p *Provisioner) DeleteHostPath(ctx context.Context, pv *v1.PersistentVolum hostNetwork: hostNetwork, } - if err := p.createCleanupPod(ctx, podOpts); err != nil { - return errors.Wrapf(err, "clean up volume %v failed", pv.Name) + // Use PVC Manager if enabled, otherwise fallback to helper pods + var cleanupErr error + if isPVCManagerEnabled() { + cleanupErr = p.createCleanupViaManager(ctx, podOpts) + } else { + cleanupErr = p.createCleanupPod(ctx, podOpts) + } + + if cleanupErr != nil { + return errors.Wrapf(cleanupErr, "clean up volume %v failed", pv.Name) } return nil } diff --git a/cmd/provisioner-localpv/app/provisioner_test.go b/cmd/provisioner-localpv/app/provisioner_test.go index 7925cff1..e1942e2d 100644 --- a/cmd/provisioner-localpv/app/provisioner_test.go +++ b/cmd/provisioner-localpv/app/provisioner_test.go @@ -19,6 +19,7 @@ package app import ( "context" "errors" + "os" "testing" v1 "k8s.io/api/core/v1" @@ -27,6 +28,19 @@ import ( ) func TestProvision(t *testing.T) { + // Disable PVC Manager mode for this test to use helper pods instead + originalPVCManagerEnabled := getPVCManagerEnabled() + // Set PVC Manager to false for this test + os.Setenv("OPENEBS_IO_ENABLE_PVC_MANAGER", "false") + defer func() { + // Restore original value + if originalPVCManagerEnabled { + os.Setenv("OPENEBS_IO_ENABLE_PVC_MANAGER", "true") + } else { + os.Unsetenv("OPENEBS_IO_ENABLE_PVC_MANAGER") + } + }() + testsCases := map[string]struct { opts pvController.ProvisionOptions errorMessage string diff --git a/cmd/provisioner-localpv/app/pvc_manager_client.go b/cmd/provisioner-localpv/app/pvc_manager_client.go new file mode 100644 index 00000000..0af0d191 --- /dev/null +++ b/cmd/provisioner-localpv/app/pvc_manager_client.go @@ -0,0 +1,157 @@ +package app + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "k8s.io/klog/v2" +) + +// PVCManagerClient represents the HTTP client for communicating with PVC Manager +type PVCManagerClient struct { + client *http.Client + baseURL string +} + +// PVCManagerRequest represents a request to the PVC Manager +type PVCManagerRequest struct { + Name string `json:"name"` + Path string `json:"path"` + NodeAffinityLabels map[string]string `json:"nodeAffinityLabels"` + FsMode string `json:"fsMode,omitempty"` + Commands []string `json:"commands"` + SoftLimitGrace string `json:"softLimitGrace,omitempty"` + HardLimitGrace string `json:"hardLimitGrace,omitempty"` + PVCStorage int64 `json:"pvcStorage,omitempty"` +} + +// PVCManagerResponse represents a response from the PVC Manager +type PVCManagerResponse struct { + Success bool `json:"success"` + Message string `json:"message,omitempty"` + Error string `json:"error,omitempty"` +} + +// NewPVCManagerClient creates a new PVC Manager client +func NewPVCManagerClient(baseURL string) *PVCManagerClient { + return &PVCManagerClient{ + client: &http.Client{ + Timeout: 60 * time.Second, // Increased timeout for volume operations + }, + baseURL: baseURL, + } +} + +// CreateVolume sends a request to create a volume via PVC Manager +func (c *PVCManagerClient) CreateVolume(ctx context.Context, req *PVCManagerRequest) error { + url := fmt.Sprintf("%s/api/v1/volumes/create", c.baseURL) + return c.sendRequest(ctx, "POST", url, req) +} + +// DeleteVolume sends a request to delete a volume via PVC Manager +func (c *PVCManagerClient) DeleteVolume(ctx context.Context, req *PVCManagerRequest) error { + url := fmt.Sprintf("%s/api/v1/volumes/delete", c.baseURL) + return c.sendRequest(ctx, "POST", url, req) +} + +// ApplyQuota sends a request to apply quota via PVC Manager +func (c *PVCManagerClient) ApplyQuota(ctx context.Context, req *PVCManagerRequest) error { + url := fmt.Sprintf("%s/api/v1/volumes/quota", c.baseURL) + return c.sendRequest(ctx, "POST", url, req) +} + +// HealthCheck performs a health check on the PVC Manager +func (c *PVCManagerClient) HealthCheck(ctx context.Context) error { + url := fmt.Sprintf("%s/api/v1/health", c.baseURL) + + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return fmt.Errorf("failed to create health check request: %v", err) + } + + resp, err := c.client.Do(req) + if err != nil { + return fmt.Errorf("health check failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("health check failed with status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + +// sendRequest sends an HTTP request to the PVC Manager +func (c *PVCManagerClient) sendRequest(ctx context.Context, method, url string, reqData *PVCManagerRequest) error { + // Marshal request data to JSON + jsonData, err := json.Marshal(reqData) + if err != nil { + return fmt.Errorf("failed to marshal request: %v", err) + } + + // Create HTTP request + req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonData)) + if err != nil { + return fmt.Errorf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", "application/json") + + // Send request + klog.V(4).Infof("Sending %s request to %s", method, url) + resp, err := c.client.Do(req) + if err != nil { + return fmt.Errorf("request failed: %v", err) + } + defer resp.Body.Close() + + // Read response + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read response: %v", err) + } + + // Check status code + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + var response PVCManagerResponse + if err := json.Unmarshal(body, &response); err != nil { + klog.Warningf("Failed to parse response JSON: %v", err) + return nil // Request succeeded even if we can't parse the response + } + + if !response.Success { + return fmt.Errorf("PVC Manager operation failed: %s", response.Error) + } + + klog.V(4).Infof("PVC Manager operation succeeded: %s", response.Message) + return nil + } + + // Handle error response + var errorResponse struct { + Error string `json:"error"` + Message string `json:"message"` + } + + if err := json.Unmarshal(body, &errorResponse); err == nil { + return fmt.Errorf("PVC Manager error (status %d): %s - %s", resp.StatusCode, errorResponse.Error, errorResponse.Message) + } + + return fmt.Errorf("PVC Manager request failed with status %d: %s", resp.StatusCode, string(body)) +} + +// GetPVCManagerURL constructs the PVC Manager URL for a given Pod IP +func GetPVCManagerURL(podIPOrNodeIP string) string { + // The PVC Manager will be running as a DaemonSet on each node + // We can access it via the pod's IP address or node IP address on the configured port + // Using Pod IP address is preferred for direct pod communication + port := GetPVCManagerPort() + return fmt.Sprintf("http://%s:%s", podIPOrNodeIP, port) +} diff --git a/cmd/pvc-manager/app/server.go b/cmd/pvc-manager/app/server.go new file mode 100644 index 00000000..db9abfd4 --- /dev/null +++ b/cmd/pvc-manager/app/server.go @@ -0,0 +1,226 @@ +package app + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/gorilla/mux" + "k8s.io/klog/v2" +) + +const ( + // API version + APIVersion = "v1" + + // Server version + Version = "1.0.0" +) + +// Server represents the PVC Manager HTTP server +type Server struct { + listenAddr string + server *http.Server + volumeManager *VolumeManager + router *mux.Router +} + +// NewServer creates a new PVC Manager server +func NewServer(listenAddr string) *Server { + s := &Server{ + listenAddr: listenAddr, + volumeManager: NewVolumeManager(), + } + + s.setupRoutes() + + return s +} + +// setupRoutes configures the HTTP routes +func (s *Server) setupRoutes() { + s.router = mux.NewRouter() + + // API v1 routes + api := s.router.PathPrefix(fmt.Sprintf("/api/%s", APIVersion)).Subrouter() + + // Health check + api.HandleFunc("/health", s.healthHandler).Methods("GET") + + // Volume operations + api.HandleFunc("/volumes/create", s.createVolumeHandler).Methods("POST") + api.HandleFunc("/volumes/delete", s.deleteVolumeHandler).Methods("POST") + api.HandleFunc("/volumes/quota", s.applyQuotaHandler).Methods("POST") + + // Add middleware + s.router.Use(s.loggingMiddleware) + s.router.Use(s.corsMiddleware) +} + +// Start starts the HTTP server +func (s *Server) Start() error { + s.server = &http.Server{ + Addr: s.listenAddr, + Handler: s.router, + ReadTimeout: 30 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 60 * time.Second, + } + + klog.Infof("PVC Manager server listening on %s", s.listenAddr) + return s.server.ListenAndServe() +} + +// Stop gracefully shuts down the HTTP server +func (s *Server) Stop() error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + return s.server.Shutdown(ctx) +} + +// healthHandler handles health check requests +func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) { + response := HealthResponse{ + Status: "healthy", + Timestamp: time.Now().Format(time.RFC3339), + Version: Version, + } + + WriteJSONResponse(w, http.StatusOK, response) +} + +// createVolumeHandler handles volume creation requests +func (s *Server) createVolumeHandler(w http.ResponseWriter, r *http.Request) { + var req VolumeRequest + if err := ParseJSONRequest(r, &req); err != nil { + WriteError(w, http.StatusBadRequest, err.Error()) + return + } + + if err := s.validateVolumeRequest(&req); err != nil { + WriteError(w, http.StatusBadRequest, err.Error()) + return + } + + if err := s.volumeManager.CreateVolume(r.Context(), &req); err != nil { + klog.Errorf("Failed to create volume %s: %v", req.Name, err) + WriteError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create volume: %v", err)) + return + } + + response := VolumeResponse{ + Success: true, + Message: fmt.Sprintf("Volume %s created successfully", req.Name), + } + + WriteJSONResponse(w, http.StatusOK, response) +} + +// deleteVolumeHandler handles volume deletion requests +func (s *Server) deleteVolumeHandler(w http.ResponseWriter, r *http.Request) { + var req VolumeRequest + if err := ParseJSONRequest(r, &req); err != nil { + WriteError(w, http.StatusBadRequest, err.Error()) + return + } + + if req.Name == "" || req.Path == "" { + WriteError(w, http.StatusBadRequest, "name and path are required for volume deletion") + return + } + + if err := s.volumeManager.DeleteVolume(r.Context(), &req); err != nil { + klog.Errorf("Failed to delete volume %s: %v", req.Name, err) + WriteError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to delete volume: %v", err)) + return + } + + response := VolumeResponse{ + Success: true, + Message: fmt.Sprintf("Volume %s deleted successfully", req.Name), + } + + WriteJSONResponse(w, http.StatusOK, response) +} + +// applyQuotaHandler handles quota application requests +func (s *Server) applyQuotaHandler(w http.ResponseWriter, r *http.Request) { + var req VolumeRequest + if err := ParseJSONRequest(r, &req); err != nil { + WriteError(w, http.StatusBadRequest, err.Error()) + return + } + + if err := s.validateQuotaRequest(&req); err != nil { + WriteError(w, http.StatusBadRequest, err.Error()) + return + } + + if err := s.volumeManager.ApplyQuota(r.Context(), &req); err != nil { + klog.Errorf("Failed to apply quota for volume %s: %v", req.Name, err) + WriteError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to apply quota: %v", err)) + return + } + + response := VolumeResponse{ + Success: true, + Message: fmt.Sprintf("Quota applied successfully for volume %s", req.Name), + } + + WriteJSONResponse(w, http.StatusOK, response) +} + +// validateVolumeRequest validates a volume request +func (s *Server) validateVolumeRequest(req *VolumeRequest) error { + if req.Name == "" { + return fmt.Errorf("name is required") + } + if req.Path == "" { + return fmt.Errorf("path is required") + } + if len(req.Commands) == 0 { + return fmt.Errorf("commands are required") + } + return nil +} + +// validateQuotaRequest validates a quota request +func (s *Server) validateQuotaRequest(req *VolumeRequest) error { + if req.Name == "" { + return fmt.Errorf("name is required") + } + if req.Path == "" { + return fmt.Errorf("path is required") + } + if req.PVCStorage == 0 { + return fmt.Errorf("pvcStorage is required for quota operations") + } + return nil +} + +// loggingMiddleware logs HTTP requests +func (s *Server) loggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + next.ServeHTTP(w, r) + klog.Infof("%s %s %s", r.Method, r.URL.Path, time.Since(start)) + }) +} + +// corsMiddleware adds CORS headers +func (s *Server) corsMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type") + + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/cmd/pvc-manager/app/types.go b/cmd/pvc-manager/app/types.go new file mode 100644 index 00000000..d96bcf58 --- /dev/null +++ b/cmd/pvc-manager/app/types.go @@ -0,0 +1,70 @@ +package app + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// VolumeRequest represents a request for volume operations +type VolumeRequest struct { + Name string `json:"name"` + Path string `json:"path"` + NodeAffinityLabels map[string]string `json:"nodeAffinityLabels"` + FsMode string `json:"fsMode,omitempty"` + Commands []string `json:"commands"` + SoftLimitGrace string `json:"softLimitGrace,omitempty"` + HardLimitGrace string `json:"hardLimitGrace,omitempty"` + PVCStorage int64 `json:"pvcStorage,omitempty"` +} + +// VolumeResponse represents a response for volume operations +type VolumeResponse struct { + Success bool `json:"success"` + Message string `json:"message,omitempty"` + Error string `json:"error,omitempty"` +} + +// HealthResponse represents a health check response +type HealthResponse struct { + Status string `json:"status"` + Timestamp string `json:"timestamp"` + Version string `json:"version"` +} + +// APIError represents an API error response +type APIError struct { + Error string `json:"error"` + Code int `json:"code"` + Message string `json:"message"` +} + +// WriteJSONResponse writes a JSON response to the http.ResponseWriter +func WriteJSONResponse(w http.ResponseWriter, statusCode int, data interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + json.NewEncoder(w).Encode(data) +} + +// WriteError writes an error response to the http.ResponseWriter +func WriteError(w http.ResponseWriter, statusCode int, message string) { + WriteJSONResponse(w, statusCode, APIError{ + Error: http.StatusText(statusCode), + Code: statusCode, + Message: message, + }) +} + +// ParseJSONRequest parses a JSON request body into the provided interface +func ParseJSONRequest(r *http.Request, v interface{}) error { + if r.Body == nil { + return fmt.Errorf("request body is empty") + } + defer r.Body.Close() + + if err := json.NewDecoder(r.Body).Decode(v); err != nil { + return fmt.Errorf("failed to parse JSON request: %v", err) + } + + return nil +} diff --git a/cmd/pvc-manager/app/volume_manager.go b/cmd/pvc-manager/app/volume_manager.go new file mode 100644 index 00000000..049e3a65 --- /dev/null +++ b/cmd/pvc-manager/app/volume_manager.go @@ -0,0 +1,261 @@ +package app + +import ( + "context" + "fmt" + "math" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + hostpath "github.com/openebs/maya/pkg/hostpath/v1alpha1" + "k8s.io/klog/v2" +) + +// VolumeManager handles volume operations on the local node +type VolumeManager struct { + // Add any necessary fields for volume management +} + +// NewVolumeManager creates a new VolumeManager instance +func NewVolumeManager() *VolumeManager { + return &VolumeManager{} +} + +// CreateVolume creates a new volume directory on the local node +func (vm *VolumeManager) CreateVolume(ctx context.Context, req *VolumeRequest) error { + klog.Infof("Creating volume %s at path %s", req.Name, req.Path) + + // Extract the base path and the volume unique path + parentDir, volumeDir, err := vm.extractPaths(req.Path) + if err != nil { + return fmt.Errorf("failed to extract paths: %v", err) + } + + // Set default file permissions if not specified + fsMode := req.FsMode + if fsMode == "" { + fsMode = "0777" + } + + // Create the directory with specified permissions + fullPath := filepath.Join(parentDir, volumeDir) + if err := vm.executeCommand(ctx, "mkdir", "-m", fsMode, "-p", fullPath); err != nil { + return fmt.Errorf("failed to create directory: %v", err) + } + + klog.Infof("Successfully created volume %s at path %s", req.Name, fullPath) + return nil +} + +// DeleteVolume removes a volume directory from the local node +func (vm *VolumeManager) DeleteVolume(ctx context.Context, req *VolumeRequest) error { + klog.Infof("Deleting volume %s at path %s", req.Name, req.Path) + + // Extract the base path and the volume unique path + parentDir, volumeDir, err := vm.extractPaths(req.Path) + if err != nil { + return fmt.Errorf("failed to extract paths: %v", err) + } + + // Remove the directory + fullPath := filepath.Join(parentDir, volumeDir) + + // check if path is xfs quota enabled and remove quota projid + cleanupCmdsForPath := fmt.Sprintf(` + d="%s" + base="%s" + # check fs type first + fs=$(stat -f -c %%T $base 2>/dev/null) + if [ "$fs" = "xfs" ]; then + id=$(xfs_io -c stat $d 2>/dev/null | awk '/projid/{print $3}' | head -1) + echo "projid=$id" + if [ -n "$id" ] && [ "$id" != "0" ]; then + # remove projid binding + xfs_io -c "chproj -R 0" "$d" 2>/dev/null || true + # remove quota limit + xfs_quota -x -c "limit -p bsoft=0 bhard=0 $id" $base 2>/dev/null || true + fi + fi + rm -rf $d + `, fullPath, parentDir) + + if err := vm.executeCommand(ctx, "sh", "-c", cleanupCmdsForPath); err != nil { + return fmt.Errorf("failed to delete directory: %v", err) + } + + klog.Infof("Successfully deleted volume %s at path %s", req.Name, fullPath) + return nil +} + +// ApplyQuota applies filesystem quota to a volume +func (vm *VolumeManager) ApplyQuota(ctx context.Context, req *VolumeRequest) error { + klog.Infof("Applying quota for volume %s at path %s", req.Name, req.Path) + + // Extract the base path and the volume unique path + parentDir, volumeDir, err := vm.extractPaths(req.Path) + if err != nil { + return fmt.Errorf("failed to extract paths: %v", err) + } + + // Convert limits to kilobytes + softLimitGrace, err := vm.convertToK(req.SoftLimitGrace, req.PVCStorage) + if err != nil { + return fmt.Errorf("failed to convert soft limit: %v", err) + } + + hardLimitGrace, err := vm.convertToK(req.HardLimitGrace, req.PVCStorage) + if err != nil { + return fmt.Errorf("failed to convert hard limit: %v", err) + } + + // Validate limits + if err := vm.validateLimits(softLimitGrace, hardLimitGrace, req.PVCStorage); err != nil { + return fmt.Errorf("invalid limits: %v", err) + } + + // Apply quota based on filesystem type + if err := vm.applyQuotaByFilesystem(ctx, parentDir, volumeDir, softLimitGrace, hardLimitGrace); err != nil { + return fmt.Errorf("failed to apply quota: %v", err) + } + + klog.Infof("Successfully applied quota for volume %s", req.Name) + return nil +} + +// extractPaths extracts parent directory and volume directory from the full path +func (vm *VolumeManager) extractPaths(fullPath string) (parentDir, volumeDir string, err error) { + // Use hostpath builder to validate and extract paths + return hostpath.NewBuilder().WithPath(fullPath). + WithCheckf(hostpath.IsNonRoot(), "volume directory {%v} should not be under root directory", fullPath). + ExtractSubPath() +} + +// executeCommand executes a system command with timeout +func (vm *VolumeManager) executeCommand(ctx context.Context, name string, args ...string) error { + // Create command context with timeout + cmdCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + cmd := exec.CommandContext(cmdCtx, name, args...) + + // Run the command + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if exitError, ok := err.(*exec.ExitError); ok { + if status, ok := exitError.Sys().(syscall.WaitStatus); ok { + // Check for specific error conditions + switch status.ExitStatus() { + case 127: // Command not found + return fmt.Errorf("command not found: %s - please ensure required quota tools are installed", name) + case 1: // General error + if strings.Contains(outputStr, "Unsupported filesystem type") { + return fmt.Errorf("unsupported filesystem type - please ensure the filesystem supports quotas and is properly mounted with quota options") + } + return fmt.Errorf("command failed with exit code %d: %s", status.ExitStatus(), outputStr) + default: + return fmt.Errorf("command failed with exit code %d: %s", status.ExitStatus(), outputStr) + } + } + } + return fmt.Errorf("command failed: %v, output: %s", err, outputStr) + } + + return nil +} + +// convertToK converts the limits to kilobytes +func (vm *VolumeManager) convertToK(limit string, pvcStorage int64) (string, error) { + if len(limit) == 0 { + return "0k", nil + } + + valueRegex := regexp.MustCompile(`[\d]*[\.]?[\d]*`) + valueString := valueRegex.FindString(limit) + + if limit != valueString+"%" { + return "", fmt.Errorf("invalid format for limit grace") + } + + value, err := strconv.ParseFloat(valueString, 64) + if err != nil { + return "", fmt.Errorf("invalid format, cannot parse") + } + + if value > 100 { + value = 100 + } + + value *= float64(pvcStorage) + value /= 100 + value += float64(pvcStorage) + value /= 1024 + + value = math.Ceil(value) + valueString = strconv.FormatFloat(value, 'f', -1, 64) + valueString += "k" + return valueString, nil +} + +// validateLimits validates quota limits +func (vm *VolumeManager) validateLimits(softLimitGrace, hardLimitGrace string, pvcStorage int64) error { + if softLimitGrace == "0k" && hardLimitGrace == "0k" { + // Use PVC storage as both limits + pvcStorageInK := math.Ceil(float64(pvcStorage) / 1024) + pvcStorageInKString := strconv.FormatFloat(pvcStorageInK, 'f', -1, 64) + "k" + softLimitGrace = pvcStorageInKString + hardLimitGrace = pvcStorageInKString + return nil + } + + if softLimitGrace == "0k" || hardLimitGrace == "0k" { + return nil + } + + if len(softLimitGrace) > len(hardLimitGrace) || + (len(softLimitGrace) == len(hardLimitGrace) && softLimitGrace > hardLimitGrace) { + return fmt.Errorf("hard limit cannot be smaller than soft limit") + } + + return nil +} + +// applyQuotaByFilesystem applies quota based on the filesystem type +func (vm *VolumeManager) applyQuotaByFilesystem(ctx context.Context, parentDir, volumeDir, softLimitGrace, hardLimitGrace string) error { + // Create a shell script to detect filesystem and apply quota + script := fmt.Sprintf(` + FS=$(stat -f -c %%T %s) + if [[ "$FS" == "xfs" ]]; then + PID=$(xfs_quota -x -c 'report -h' %s | tail -2 | awk 'NR==1{print substr ($1,2)}+0') + PID=$((PID + 1)) + xfs_quota -x -c "project -s -p %s $PID" %s + xfs_quota -x -c "limit -p bsoft=%s bhard=%s $PID" %s + elif [[ "$FS" == "ext2/ext3" ]]; then + PID=$(repquota -P %s | tail -3 | awk 'NR==1{print substr ($1,2)}+0') + PID=$((PID + 1)) + chattr +P -p $PID %s + setquota -P $PID %s %s 0 0 %s + else + echo "Unsupported filesystem type: $FS" + exit 1 + fi`, + parentDir, // stat filesystem + parentDir, // xfs_quota report + filepath.Join(parentDir, volumeDir), // project path + parentDir, // project base + softLimitGrace, hardLimitGrace, // xfs limits + parentDir, // xfs base + parentDir, // repquota + filepath.Join(parentDir, volumeDir), // chattr path + strings.ToUpper(softLimitGrace), strings.ToUpper(hardLimitGrace), // ext quota limits + parentDir, // setquota base + ) + + // Execute the quota script + return vm.executeCommand(ctx, "sh", "-c", script) +} diff --git a/cmd/pvc-manager/main.go b/cmd/pvc-manager/main.go new file mode 100644 index 00000000..6bf4b90a --- /dev/null +++ b/cmd/pvc-manager/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "flag" + "os" + "os/signal" + "syscall" + + "k8s.io/klog/v2" + + "github.com/openebs/dynamic-localpv-provisioner/cmd/pvc-manager/app" +) + +func main() { + var ( + listenAddr = flag.String("listen-addr", ":8080", "The address to listen on for HTTP requests") + logLevel = flag.String("log-level", "info", "Log level (debug, info, warn, error)") + ) + flag.Parse() + + // Initialize logger - klog doesn't have SetLevel, so we just note the desired level + klog.Infof("Starting PVC Manager service with log level: %s", *logLevel) + + klog.Info("Starting PVC Manager service...") + + // Create and start the PVC manager server + server := app.NewServer(*listenAddr) + + // Start server in a goroutine + go func() { + if err := server.Start(); err != nil { + klog.Fatalf("Failed to start PVC Manager server: %v", err) + } + }() + + // Wait for interrupt signal to gracefully shutdown the server + quit := make(chan os.Signal, 1) + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + <-quit + + klog.Info("Shutting down PVC Manager service...") + if err := server.Stop(); err != nil { + klog.Errorf("Error during server shutdown: %v", err) + } + klog.Info("PVC Manager service stopped") +} diff --git a/deploy/helm/charts/PVC-MANAGER-INTEGRATION.md b/deploy/helm/charts/PVC-MANAGER-INTEGRATION.md new file mode 100644 index 00000000..5cd254b6 --- /dev/null +++ b/deploy/helm/charts/PVC-MANAGER-INTEGRATION.md @@ -0,0 +1,133 @@ +# PVC Manager Helm Chart Integration - Summary + +This document summarizes the changes made to integrate PVC Manager into the LocalPV provisioner Helm chart. + +## Files Added/Modified + +### 1. Values Configuration (`values.yaml`) +Added a comprehensive `pvcManager` section with the following configuration options: + +```yaml +pvcManager: + enabled: false # Enable/disable PVC Manager (default: false) + image: + registry: "" + repository: openebs/pvc-manager + tag: 1.0.0 + pullPolicy: IfNotPresent + port: 8080 # HTTP server port + logLevel: info # Log level + listenAddr: "0.0.0.0:8080" # Listen address + # ... additional configuration options +``` + +### 2. PVC Manager DaemonSet Template (`templates/pvc-manager-daemonset.yaml`) +- Created new template for PVC Manager DaemonSet +- Includes proper health checks (liveness and readiness probes) +- Configurable resource limits and requests +- Host network and privileged access for volume operations +- Volume mounts for `/var/openebs/local`, `/dev`, `/proc`, `/sys` +- Tolerations and node selectors + +### 3. PVC Manager Service Template (`templates/pvc-manager-service.yaml`) +- Created ClusterIP service for PVC Manager +- Exposes port 8080 for HTTP communication +- Optional service creation based on configuration + +### 4. RBAC Configuration (`templates/rbac.yaml`) +Enhanced existing RBAC template to include PVC Manager resources: +- ServiceAccount: `openebs-pvc-manager` +- ClusterRole with permissions for nodes, PVs, PVCs, and events +- ClusterRoleBinding to link ServiceAccount and ClusterRole + +### 5. Provisioner Integration (`templates/deployment.yaml`) +Added environment variables to the provisioner deployment: +- `OPENEBS_IO_ENABLE_PVC_MANAGER`: Controls PVC Manager usage +- `OPENEBS_IO_PVC_MANAGER_PORT`: Specifies PVC Manager port + +### 6. Documentation Updates (`README.md`) +Added comprehensive documentation for all PVC Manager configuration parameters. + +### 7. Example Configuration (`examples/pvc-manager-example.md`) +Created example showing how to use PVC Manager with different configuration options. + +## Key Features + +### 1. Conditional Deployment +- PVC Manager resources are only created when `pvcManager.enabled=true` +- Backward compatibility maintained when disabled + +### 2. Flexible Configuration +- All aspects of PVC Manager deployment are configurable +- Resource limits, tolerations, node selectors, etc. +- Image registry/repository/tag customization + +### 3. Proper Integration +- Environment variables automatically set in provisioner +- RBAC permissions properly scoped +- Service discovery via ClusterIP service + +### 4. Production Ready +- Health checks configured +- Resource limits set +- Security contexts defined +- Tolerations for node scheduling + +## Usage Examples + +### Basic Installation with PVC Manager +```bash +helm install openebs-localpv openebs-localpv/localpv-provisioner \ + --namespace openebs \ + --create-namespace \ + --set pvcManager.enabled=true +``` + +### Advanced Configuration +```bash +helm install openebs-localpv openebs-localpv/localpv-provisioner \ + --namespace openebs \ + --create-namespace \ + --set pvcManager.enabled=true \ + --set pvcManager.logLevel=debug \ + --set pvcManager.resources.requests.cpu=150m +``` + +### With Custom Values File +```yaml +# values.yaml +pvcManager: + enabled: true + logLevel: info + resources: + limits: + cpu: 200m + memory: 128Mi +``` + +```bash +helm install openebs-localpv openebs-localpv/localpv-provisioner \ + --namespace openebs \ + --create-namespace \ + -f values.yaml +``` + +## Verification + +The implementation has been tested with: +- `helm lint` - Passes successfully +- `helm template` - Renders correctly +- Both enabled and disabled states work properly +- Environment variables set correctly based on configuration + +## Architecture Benefits + +When PVC Manager is enabled: +- **Performance**: Eliminates helper pod creation overhead +- **Efficiency**: Persistent DaemonSet vs ephemeral pods +- **Scalability**: Better resource utilization +- **Monitoring**: Centralized volume operation management + +When PVC Manager is disabled: +- **Compatibility**: Falls back to traditional helper pod approach +- **Migration**: Smooth transition path for existing deployments \ No newline at end of file diff --git a/deploy/helm/charts/README.md b/deploy/helm/charts/README.md index abdd0b9e..e4bc5fb4 100644 --- a/deploy/helm/charts/README.md +++ b/deploy/helm/charts/README.md @@ -104,6 +104,30 @@ helm install openebs-localpv openebs-localpv/localpv-provisioner --namespace ope | `localpv.enableLeaderElection` | Enable leader election | `true` | | `localpv.affinity` | LocalPV Provisioner pod affinity | `{}` | | `localpv.priorityClassName` | Sets priorityClassName in pod | `""` | +| `pvcManager.enabled` | Enable PVC Manager DaemonSet deployment | `false` | +| `pvcManager.image.registry` | Registry for PVC Manager image | `""` | +| `pvcManager.image.repository` | Image repository for PVC Manager | `openebs/pvc-manager` | +| `pvcManager.image.tag` | Image tag for PVC Manager | `1.0.0` | +| `pvcManager.image.pullPolicy` | Image pull policy for PVC Manager | `IfNotPresent` | +| `pvcManager.port` | Port on which the PVC Manager HTTP server listens | `8080` | +| `pvcManager.logLevel` | Log level for PVC Manager | `info` | +| `pvcManager.listenAddr` | Listen address for PVC Manager HTTP server | `"0.0.0.0:8080"` | +| `pvcManager.hostNetwork` | Use host network for PVC Manager pods | `true` | +| `pvcManager.hostPID` | Use host PID namespace for PVC Manager pods | `false` | +| `pvcManager.dnsPolicy` | DNS policy for PVC Manager pods | `ClusterFirstWithHostNet` | +| `pvcManager.basePath` | Base path on the host for volume operations | `"/var/openebs/local"` | +| `pvcManager.resources.limits.cpu` | CPU limit for PVC Manager containers | `200m` | +| `pvcManager.resources.limits.memory` | Memory limit for PVC Manager containers | `128Mi` | +| `pvcManager.resources.requests.cpu` | CPU request for PVC Manager containers | `100m` | +| `pvcManager.resources.requests.memory` | Memory request for PVC Manager containers | `64Mi` | +| `pvcManager.nodeSelector` | Node selector for PVC Manager DaemonSet | `{"kubernetes.io/os": "linux"}` | +| `pvcManager.tolerations` | Tolerations for PVC Manager DaemonSet | `[NoSchedule, NoExecute]` | +| `pvcManager.securityContext.privileged` | Run PVC Manager containers with privileged security context | `true` | +| `pvcManager.service.enabled` | Enable Service for PVC Manager | `true` | +| `pvcManager.service.type` | Service type for PVC Manager | `ClusterIP` | +| `pvcManager.service.port` | Service port for PVC Manager | `8080` | +| `pvcManager.rbac.create` | Create RBAC resources for PVC Manager | `true` | +| `pvcManager.rbac.serviceAccountName` | Service account name for PVC Manager | `openebs-pvc-manager` | | `rbac.create` | Enable RBAC Resources | `true` | | `rbac.pspEnabled` | Create pod security policy resources | `false` | diff --git a/deploy/helm/charts/examples/pvc-manager-example.md b/deploy/helm/charts/examples/pvc-manager-example.md new file mode 100644 index 00000000..f48109bf --- /dev/null +++ b/deploy/helm/charts/examples/pvc-manager-example.md @@ -0,0 +1,96 @@ +# Example: Enabling PVC Manager + +This example shows how to install the LocalPV provisioner with PVC Manager enabled. + +## Installation with PVC Manager + +To install the LocalPV provisioner with PVC Manager enabled: + +```bash +helm install openebs-localpv openebs-localpv/localpv-provisioner \ + --namespace openebs \ + --create-namespace \ + --set pvcManager.enabled=true +``` + +## Advanced Configuration + +For advanced PVC Manager configuration: + +```bash +helm install openebs-localpv openebs-localpv/localpv-provisioner \ + --namespace openebs \ + --create-namespace \ + --set pvcManager.enabled=true \ + --set pvcManager.logLevel=debug \ + --set pvcManager.resources.requests.cpu=150m \ + --set pvcManager.resources.requests.memory=96Mi +``` + +## Custom Values File + +Create a `pvc-manager-values.yaml` file: + +```yaml +pvcManager: + enabled: true + logLevel: info + port: 8080 + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 100m + memory: 64Mi + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + nodeSelector: + kubernetes.io/os: linux + +# Enable analytics (optional) +analytics: + enabled: true +``` + +Then install: + +```bash +helm install openebs-localpv openebs-localpv/localpv-provisioner \ + --namespace openebs \ + --create-namespace \ + -f pvc-manager-values.yaml +``` + +## Verification + +After installation, verify that PVC Manager is running: + +```bash +# Check DaemonSet status +kubectl get daemonset -n openebs -l app=pvc-manager + +# Check PVC Manager pods +kubectl get pods -n openebs -l app=pvc-manager + +# Check Service +kubectl get svc -n openebs -l app=pvc-manager + +# Check logs +kubectl logs -n openebs -l app=pvc-manager +``` + +## PVC Manager vs Helper Pods + +When PVC Manager is enabled (`pvcManager.enabled=true`), the provisioner will: +- Use HTTP requests to PVC Manager for volume operations +- Avoid creating helper pods for each volume operation +- Provide better performance for volume provisioning + +When PVC Manager is disabled (`pvcManager.enabled=false`), the provisioner will: +- Fall back to the traditional helper pod approach +- Create a helper pod for each volume operation +- Maintain backward compatibility \ No newline at end of file diff --git a/deploy/helm/charts/templates/_helpers.tpl b/deploy/helm/charts/templates/_helpers.tpl index d49f1625..238308bb 100644 --- a/deploy/helm/charts/templates/_helpers.tpl +++ b/deploy/helm/charts/templates/_helpers.tpl @@ -18,6 +18,8 @@ If release name contains chart name it will be used as a full name. {{- $name := default .Chart.Name .Values.nameOverride -}} {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else if contains "localpv" .Release.Name -}} +{{- printf "%s-provisioner" .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/deploy/helm/charts/templates/deployment.yaml b/deploy/helm/charts/templates/deployment.yaml index c631cf0b..a45a9c17 100644 --- a/deploy/helm/charts/templates/deployment.yaml +++ b/deploy/helm/charts/templates/deployment.yaml @@ -105,6 +105,16 @@ spec: - name: OPENEBS_IO_IMAGE_PULL_SECRETS value: "{{- range $index, $secret := .Values.imagePullSecrets}}{{if $index}},{{end}}{{ $secret.name }}{{- end}}" {{- end }} + # PVC Manager configuration + {{- if .Values.pvcManager.enabled }} + - name: OPENEBS_IO_ENABLE_PVC_MANAGER + value: "true" + - name: OPENEBS_IO_PVC_MANAGER_PORT + value: "{{ .Values.pvcManager.port }}" + {{- else }} + - name: OPENEBS_IO_ENABLE_PVC_MANAGER + value: "false" + {{- end }} # Process name used for matching is limited to the 15 characters # present in the pgrep output. # So fullname can't be used here with pgrep (>15 chars).A regular expression diff --git a/deploy/helm/charts/templates/pvc-manager-daemonset.yaml b/deploy/helm/charts/templates/pvc-manager-daemonset.yaml new file mode 100644 index 00000000..83360697 --- /dev/null +++ b/deploy/helm/charts/templates/pvc-manager-daemonset.yaml @@ -0,0 +1,125 @@ +{{- if .Values.pvcManager.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "localpv.fullname" . }}-pvc-manager + namespace: {{ .Release.Namespace }} + labels: + {{- include "localpv.labels" . | nindent 4 }} + app: pvc-manager + component: localpv-provisioner + {{- if .Values.extraLabels }} + {{- toYaml .Values.extraLabels | nindent 4 }} + {{- end }} + {{- with .Values.pvcManager.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app: pvc-manager + release: {{ .Release.Name }} + component: localpv-provisioner + template: + metadata: + {{- with .Values.pvcManager.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: pvc-manager + release: {{ .Release.Name }} + component: localpv-provisioner + chart: {{ include "localpv.chart" . }} + heritage: {{ .Release.Service }} + {{- if .Values.extraLabels }} + {{- toYaml .Values.extraLabels | nindent 8 }} + {{- end }} + {{- with .Values.pvcManager.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.loggingLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ .Values.pvcManager.rbac.serviceAccountName }} + hostNetwork: {{ .Values.pvcManager.hostNetwork }} + hostPID: {{ .Values.pvcManager.hostPID }} + dnsPolicy: {{ .Values.pvcManager.dnsPolicy }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: pvc-manager + image: "{{ with .Values.pvcManager.image.registry | default .Values.global.imageRegistry | trimSuffix "/" }}{{ . }}/{{ end }}{{ .Values.pvcManager.image.repository }}:{{ .Values.pvcManager.image.tag }}" + imagePullPolicy: {{ .Values.pvcManager.image.pullPolicy }} + args: + - "--listen-addr={{ .Values.pvcManager.listenAddr }}" + - "--log-level={{ .Values.pvcManager.logLevel }}" + ports: + - containerPort: {{ .Values.pvcManager.port }} + name: http + protocol: TCP + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + {{- toYaml .Values.pvcManager.securityContext | nindent 10 }} + volumeMounts: + - name: basepath + mountPath: {{ .Values.pvcManager.basePath }} + - name: dev + mountPath: /dev + - name: proc + mountPath: /host/proc + - name: sys + mountPath: /host/sys + livenessProbe: + httpGet: + path: /api/v1/health + port: {{ .Values.pvcManager.port }} + initialDelaySeconds: {{ .Values.pvcManager.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.pvcManager.healthCheck.periodSeconds }} + readinessProbe: + httpGet: + path: /api/v1/health + port: {{ .Values.pvcManager.port }} + initialDelaySeconds: {{ .Values.pvcManager.healthCheck.readinessInitialDelaySeconds }} + periodSeconds: {{ .Values.pvcManager.healthCheck.readinessPeriodSeconds }} + resources: + {{- toYaml .Values.pvcManager.resources | nindent 10 }} + volumes: + - name: basepath + hostPath: + path: {{ .Values.pvcManager.basePath }} + type: DirectoryOrCreate + - name: dev + hostPath: + path: /dev + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- with .Values.pvcManager.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.pvcManager.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/deploy/helm/charts/templates/pvc-manager-service.yaml b/deploy/helm/charts/templates/pvc-manager-service.yaml new file mode 100644 index 00000000..a9bd7fd1 --- /dev/null +++ b/deploy/helm/charts/templates/pvc-manager-service.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.pvcManager.enabled .Values.pvcManager.service.enabled }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "localpv.fullname" . }}-pvc-manager + namespace: {{ .Release.Namespace }} + labels: + {{- include "localpv.labels" . | nindent 4 }} + app: pvc-manager + component: localpv-provisioner + {{- if .Values.extraLabels }} + {{- toYaml .Values.extraLabels | nindent 4 }} + {{- end }} + {{- with .Values.pvcManager.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.pvcManager.service.type }} + selector: + app: pvc-manager + {{- include "localpv.selectorLabels" . | nindent 4 }} + ports: + - port: {{ .Values.pvcManager.service.port }} + targetPort: {{ .Values.pvcManager.port }} + protocol: TCP + name: http +{{- end }} \ No newline at end of file diff --git a/deploy/helm/charts/templates/rbac.yaml b/deploy/helm/charts/templates/rbac.yaml index aeeed85e..c6523a43 100644 --- a/deploy/helm/charts/templates/rbac.yaml +++ b/deploy/helm/charts/templates/rbac.yaml @@ -115,3 +115,57 @@ subjects: namespace: {{ $.Release.Namespace }} {{- end }} {{- end }} +{{- if and .Values.pvcManager.enabled .Values.pvcManager.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.pvcManager.rbac.serviceAccountName }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "localpv.labels" . | nindent 4 }} + app: pvc-manager + component: localpv-provisioner + {{- if .Values.extraLabels }} + {{- toYaml .Values.extraLabels | nindent 4 }} + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Values.pvcManager.rbac.serviceAccountName }} + labels: + {{- include "localpv.labels" . | nindent 4 }} + app: pvc-manager + component: localpv-provisioner + {{- if .Values.extraLabels }} + {{- toYaml .Values.extraLabels | nindent 4 }} + {{- end }} +rules: +- apiGroups: [""] + resources: ["nodes", "persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.pvcManager.rbac.serviceAccountName }} + labels: + {{- include "localpv.labels" . | nindent 4 }} + app: pvc-manager + component: localpv-provisioner + {{- if .Values.extraLabels }} + {{- toYaml .Values.extraLabels | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.pvcManager.rbac.serviceAccountName }} +subjects: +- kind: ServiceAccount + name: {{ .Values.pvcManager.rbac.serviceAccountName }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/deploy/helm/charts/values.yaml b/deploy/helm/charts/values.yaml index 17ee6e8e..49cb8dc6 100644 --- a/deploy/helm/charts/values.yaml +++ b/deploy/helm/charts/values.yaml @@ -143,3 +143,66 @@ analytics: enabled: true # Specify in hours the duration after which a ping event needs to be sent. pingInterval: "24h" + +pvcManager: + # If true, enables the PVC Manager DaemonSet deployment + enabled: false + image: + registry: "" + repository: openebs/pvc-manager + tag: 1.0.0 + pullPolicy: IfNotPresent + # Port on which the PVC Manager HTTP server listens + port: 8080 + # Log level for PVC Manager + logLevel: info + # Listen address for PVC Manager HTTP server + listenAddr: "0.0.0.0:8080" + annotations: {} + podAnnotations: {} + # Labels to be added to PVC Manager DaemonSet pods + podLabels: + app: pvc-manager + component: localpv-provisioner + # Health check configuration + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 10 + readinessInitialDelaySeconds: 5 + readinessPeriodSeconds: 5 + # Resource limits and requests for PVC Manager + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 100m + memory: 64Mi + # Node selector for PVC Manager DaemonSet + nodeSelector: + kubernetes.io/os: linux + # Tolerations for PVC Manager DaemonSet + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + # Security context for PVC Manager containers + securityContext: + privileged: true + # Base path on the host for volume operations + basePath: "/var/openebs/local" + # Host network configuration + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + # Service configuration for PVC Manager + service: + enabled: true + type: ClusterIP + port: 8080 + annotations: {} + # RBAC configuration for PVC Manager + rbac: + create: true + serviceAccountName: openebs-pvc-manager diff --git a/deploy/kubectl/pvc-manager-daemonset.yaml b/deploy/kubectl/pvc-manager-daemonset.yaml new file mode 100644 index 00000000..24926410 --- /dev/null +++ b/deploy/kubectl/pvc-manager-daemonset.yaml @@ -0,0 +1,116 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: pvc-manager + namespace: openebs + labels: + app: pvc-manager + component: localpv-provisioner + version: "1.0.0" +spec: + selector: + matchLabels: + app: pvc-manager + template: + metadata: + labels: + app: pvc-manager + component: localpv-provisioner + spec: + serviceAccountName: openebs-pvc-manager + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - name: pvc-manager + image: openebs/pvc-manager:1.0.0 + imagePullPolicy: IfNotPresent + args: + - "--listen-addr=0.0.0.0:8080" + - "--log-level=info" + ports: + - containerPort: 8080 + name: http + protocol: TCP + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + privileged: true + volumeMounts: + - name: basepath + mountPath: /var/openebs/local + - name: dev + mountPath: /dev + - name: proc + mountPath: /host/proc + - name: sys + mountPath: /host/sys + livenessProbe: + httpGet: + path: /api/v1/health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /api/v1/health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 100m + memory: 64Mi + volumes: + - name: basepath + hostPath: + path: /var/openebs/local + type: DirectoryOrCreate + - name: dev + hostPath: + path: /dev + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + nodeSelector: + kubernetes.io/os: linux +--- +apiVersion: v1 +kind: Service +metadata: + name: pvc-manager + namespace: openebs + labels: + app: pvc-manager + component: localpv-provisioner +spec: + type: ClusterIP + selector: + app: pvc-manager + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + name: http \ No newline at end of file diff --git a/deploy/kubectl/pvc-manager-rbac.yaml b/deploy/kubectl/pvc-manager-rbac.yaml new file mode 100644 index 00000000..c32d9892 --- /dev/null +++ b/deploy/kubectl/pvc-manager-rbac.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: openebs-pvc-manager + namespace: openebs + labels: + app: pvc-manager + component: localpv-provisioner +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: openebs-pvc-manager + labels: + app: pvc-manager + component: localpv-provisioner +rules: +- apiGroups: [""] + resources: ["nodes", "persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openebs-pvc-manager + labels: + app: pvc-manager + component: localpv-provisioner +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: openebs-pvc-manager +subjects: +- kind: ServiceAccount + name: openebs-pvc-manager + namespace: openebs \ No newline at end of file diff --git a/design/pvc-manager-architecture.md b/design/pvc-manager-architecture.md new file mode 100644 index 00000000..021e3523 --- /dev/null +++ b/design/pvc-manager-architecture.md @@ -0,0 +1,201 @@ +# Dynamic LocalPV Provisioner - PVC Manager Architecture + +This document describes the new PVC Manager architecture for the OpenEBS Dynamic LocalPV Provisioner. + +## Architecture Overview + +The new architecture replaces the helper pod creation mechanism with a DaemonSet-based PVC Manager service that runs on each node and provides HTTP API endpoints for volume operations. + +### Previous Architecture (Helper Pods) +``` +Provisioner -> Creates Helper Pod -> Pod performs mkdir/rm operations -> Pod terminates +``` + +### New Architecture (PVC Manager) +``` +Provisioner -> HTTP Request -> PVC Manager (DaemonSet) -> Direct filesystem operations +``` + +## Components + +### 1. PVC Manager Service +- **Location**: `cmd/pvc-manager/` +- **Purpose**: HTTP service running on each node via DaemonSet +- **Endpoints**: + - `POST /api/v1/volumes/create` - Create volume directory + - `POST /api/v1/volumes/delete` - Delete volume directory + - `POST /api/v1/volumes/quota` - Apply filesystem quota + - `GET /api/v1/health` - Health check + +### 2. PVC Manager Client +- **Location**: `cmd/provisioner-localpv/app/pvc_manager_client.go` +- **Purpose**: HTTP client for provisioner to communicate with PVC Manager + +### 3. Provisioner Updates +- **Modified files**: + - `cmd/provisioner-localpv/app/provisioner_hostpath.go` + - `cmd/provisioner-localpv/app/helper_pvc_manager.go` + - `cmd/provisioner-localpv/app/env.go` + +## Configuration + +### Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `OPENEBS_IO_ENABLE_PVC_MANAGER` | `true` | Enable PVC Manager mode | +| `OPENEBS_IO_PVC_MANAGER_PORT` | `8080` | PVC Manager service port | + +### Deployment + +1. **Deploy PVC Manager DaemonSet**: + ```bash + kubectl apply -f deploy/kubectl/pvc-manager-rbac.yaml + kubectl apply -f deploy/kubectl/pvc-manager-daemonset.yaml + ``` + +2. **Update provisioner configuration to enable PVC Manager**: + ```yaml + env: + - name: OPENEBS_IO_ENABLE_PVC_MANAGER + value: "true" + - name: OPENEBS_IO_PVC_MANAGER_PORT + value: "8080" + ``` + +## Benefits + +### 1. Performance +- **Faster volume operations**: No pod creation/termination overhead +- **Reduced API server load**: Fewer ephemeral pod objects +- **Lower resource usage**: Single long-running process per node + +### 2. Reliability +- **Persistent service**: Always available for volume operations +- **Better error handling**: Consistent HTTP API responses +- **Health monitoring**: Built-in health check endpoints + +### 3. Scalability +- **Reduced scheduling overhead**: No pod scheduling delays +- **Better resource utilization**: Fixed resource consumption per node +- **Improved throughput**: Concurrent volume operations support + +## Migration Path + +### Backward Compatibility +The provisioner supports both modes simultaneously: +- Set `OPENEBS_IO_ENABLE_PVC_MANAGER=true` to use PVC Manager +- Set `OPENEBS_IO_ENABLE_PVC_MANAGER=false` to use helper pods (legacy) + +### Migration Steps +1. Deploy PVC Manager DaemonSet +2. Verify PVC Manager is running on all nodes +3. Update provisioner with `OPENEBS_IO_ENABLE_PVC_MANAGER=true` +4. Test volume provisioning/deprovisioning +5. Monitor for any issues + +## API Specification + +### Create Volume Request +```json +{ + "name": "volume-name", + "path": "/var/openebs/local/volume-path", + "nodeAffinityLabels": { + "kubernetes.io/hostname": "node-1" + }, + "fsMode": "0777", + "commands": ["mkdir", "-m", "0777", "-p"] +} +``` + +### Apply Quota Request +```json +{ + "name": "volume-name", + "path": "/var/openebs/local/volume-path", + "softLimitGrace": "80%", + "hardLimitGrace": "90%", + "pvcStorage": 1073741824 +} +``` + +### Response Format +```json +{ + "success": true, + "message": "Volume created successfully" +} +``` + +## Building and Testing + +### Build Commands +```bash +# Build PVC Manager +make pvc-manager + +# Build PVC Manager image +make pvc-manager-image + +# Build both provisioner and PVC Manager +make all +``` + +### Testing +```bash +# Test PVC Manager health +curl http://node-hostname:8080/api/v1/health + +# Test volume creation (example) +curl -X POST http://node-hostname:8080/api/v1/volumes/create \ + -H "Content-Type: application/json" \ + -d '{"name":"test-vol","path":"/var/openebs/local/test-vol","nodeAffinityLabels":{"kubernetes.io/hostname":"node-1"},"commands":["mkdir","-p"]}' +``` + +## Security Considerations + +### RBAC Permissions +The PVC Manager requires minimal RBAC permissions: +- Read access to nodes and PVs for validation +- Event creation for logging + +### Network Security +- PVC Manager listens on host network for direct access +- Uses HTTP (consider HTTPS for production) +- Only accessible from within the cluster + +### Filesystem Security +- Runs with privileged security context (required for filesystem operations) +- Validates paths to prevent directory traversal attacks +- Ensures volumes are created under controlled base paths + +## Troubleshooting + +### Common Issues + +1. **PVC Manager not responding** + - Check DaemonSet status: `kubectl get ds pvc-manager -n openebs` + - Check pod logs: `kubectl logs -l app=pvc-manager -n openebs` + +2. **Volume creation failures** + - Check PVC Manager logs for filesystem errors + - Verify node has sufficient disk space + - Ensure base path exists and has correct permissions + +3. **Network connectivity issues** + - Verify PVC Manager port is accessible on nodes + - Check firewall rules + - Validate service configuration + +### Debug Commands +```bash +# Check PVC Manager status +kubectl get pods -l app=pvc-manager -n openebs + +# Get PVC Manager logs +kubectl logs -l app=pvc-manager -n openebs -f + +# Test connectivity +kubectl exec -it -- curl http://:8080/api/v1/health +``` \ No newline at end of file diff --git a/examples/pvc-manager-deployment/complete-example.yaml b/examples/pvc-manager-deployment/complete-example.yaml new file mode 100644 index 00000000..b4d33769 --- /dev/null +++ b/examples/pvc-manager-deployment/complete-example.yaml @@ -0,0 +1,338 @@ +# Complete example for deploying OpenEBS Dynamic LocalPV Provisioner with PVC Manager +# +# This example demonstrates the new PVC Manager architecture that replaces +# helper pods with a DaemonSet-based HTTP service for volume operations. +# +# Apply in order: +# 1. kubectl apply -f complete-example.yaml +# 2. kubectl apply -f test-application.yaml (see below) + +--- +# Namespace for OpenEBS components +apiVersion: v1 +kind: Namespace +metadata: + name: openebs + labels: + name: openebs + +--- +# ServiceAccount for PVC Manager +apiVersion: v1 +kind: ServiceAccount +metadata: + name: openebs-pvc-manager + namespace: openebs + labels: + app: pvc-manager + component: localpv-provisioner + +--- +# ClusterRole for PVC Manager +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: openebs-pvc-manager + labels: + app: pvc-manager + component: localpv-provisioner +rules: +- apiGroups: [""] + resources: ["nodes", "persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- +# ClusterRoleBinding for PVC Manager +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openebs-pvc-manager + labels: + app: pvc-manager + component: localpv-provisioner +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: openebs-pvc-manager +subjects: +- kind: ServiceAccount + name: openebs-pvc-manager + namespace: openebs + +--- +# PVC Manager DaemonSet +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: pvc-manager + namespace: openebs + labels: + app: pvc-manager + component: localpv-provisioner + version: "1.0.0" +spec: + selector: + matchLabels: + app: pvc-manager + template: + metadata: + labels: + app: pvc-manager + component: localpv-provisioner + spec: + serviceAccountName: openebs-pvc-manager + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - name: pvc-manager + image: openebs/pvc-manager:1.0.0 + imagePullPolicy: IfNotPresent + args: + - "--listen-addr=0.0.0.0:8080" + - "--log-level=info" + ports: + - containerPort: 8080 + name: http + protocol: TCP + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + privileged: true + volumeMounts: + - name: basepath + mountPath: /var/openebs/local + - name: dev + mountPath: /dev + - name: proc + mountPath: /host/proc + - name: sys + mountPath: /host/sys + livenessProbe: + httpGet: + path: /api/v1/health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /api/v1/health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 100m + memory: 64Mi + volumes: + - name: basepath + hostPath: + path: /var/openebs/local + type: DirectoryOrCreate + - name: dev + hostPath: + path: /dev + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + nodeSelector: + kubernetes.io/os: linux + +--- +# ServiceAccount for LocalPV Provisioner +apiVersion: v1 +kind: ServiceAccount +metadata: + name: openebs-localpv-provisioner + namespace: openebs + labels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + +--- +# ClusterRole for LocalPV Provisioner +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: openebs-localpv-provisioner + labels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner +rules: +- apiGroups: [""] + resources: ["nodes", "persistentvolumes", "persistentvolumeclaims"] + verbs: ["get", "list", "watch", "create", "delete", "patch", "update"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + +--- +# ClusterRoleBinding for LocalPV Provisioner +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openebs-localpv-provisioner + labels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: openebs-localpv-provisioner +subjects: +- kind: ServiceAccount + name: openebs-localpv-provisioner + namespace: openebs + +--- +# LocalPV Provisioner Deployment (with PVC Manager enabled) +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openebs-localpv-provisioner + namespace: openebs + labels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + openebs.io/version: dev +spec: + selector: + matchLabels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + openebs.io/version: dev + spec: + serviceAccount: openebs-localpv-provisioner + containers: + - name: openebs-provisioner-hostpath + imagePullPolicy: IfNotPresent + image: openebs/provisioner-localpv:1.0.0 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OPENEBS_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPENEBS_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: OPENEBS_IO_ENABLE_ANALYTICS + value: "true" + - name: OPENEBS_IO_INSTALLER_TYPE + value: "openebs-operator" + - name: OPENEBS_IO_HELPER_IMAGE + value: "openebs/linux-utils:latest" + # Enable PVC Manager mode + - name: OPENEBS_IO_ENABLE_PVC_MANAGER + value: "true" + - name: OPENEBS_IO_PVC_MANAGER_PORT + value: "8080" + livenessProbe: + exec: + command: + - pgrep + - ".*provisioner" + initialDelaySeconds: 30 + periodSeconds: 60 + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + +--- +# StorageClass for HostPath LocalPV with PVC Manager +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-hostpath-pvc-manager + annotations: + openebs.io/cas-type: local + cas.openebs.io/config: | + - name: StorageType + value: "hostpath" + - name: BasePath + value: "/var/openebs/local" +provisioner: openebs.io/local +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true + +--- +# Example test application manifest (save as test-application.yaml) +# apiVersion: v1 +# kind: PersistentVolumeClaim +# metadata: +# name: local-hostpath-pvc +# namespace: default +# spec: +# storageClassName: openebs-hostpath-pvc-manager +# accessModes: +# - ReadWriteOnce +# resources: +# requests: +# storage: 5Gi +# +# --- +# apiVersion: v1 +# kind: Pod +# metadata: +# name: hello-local-hostpath-pod +# namespace: default +# spec: +# volumes: +# - name: local-storage +# persistentVolumeClaim: +# claimName: local-hostpath-pvc +# containers: +# - name: hello-container +# image: busybox +# command: +# - sh +# - -c +# - 'while true; do echo "`date` [`hostname`] Hello from OpenEBS Local PV." >> /mnt/store/greetings.txt; sleep $(($RANDOM % 5 + 300)); done' +# volumeMounts: +# - mountPath: /mnt/store +# name: local-storage \ No newline at end of file diff --git a/examples/pvc-manager-deployment/test-application.yaml b/examples/pvc-manager-deployment/test-application.yaml new file mode 100644 index 00000000..6c0f302e --- /dev/null +++ b/examples/pvc-manager-deployment/test-application.yaml @@ -0,0 +1,47 @@ +# Test application for OpenEBS LocalPV with PVC Manager +# This creates a PVC and pod to test the new PVC Manager architecture + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: local-hostpath-pvc-manager-test + namespace: default +spec: + storageClassName: openebs-hostpath-pvc-manager + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-local-hostpath-pvc-manager + namespace: default + labels: + app: hello-local-hostpath +spec: + volumes: + - name: local-storage + persistentVolumeClaim: + claimName: local-hostpath-pvc-manager-test + containers: + - name: hello-container + image: busybox:1.35 + command: + - sh + - -c + - 'while true; do echo "`date` [`hostname`] Hello from OpenEBS Local PV with PVC Manager." >> /mnt/store/greetings.txt; cat /mnt/store/greetings.txt; sleep 60; done' + volumeMounts: + - mountPath: /mnt/store + name: local-storage + resources: + limits: + cpu: 100m + memory: 64Mi + requests: + cpu: 50m + memory: 32Mi + restartPolicy: Always \ No newline at end of file diff --git a/examples/pvc-manager-deployment/test-pvc-manager.sh b/examples/pvc-manager-deployment/test-pvc-manager.sh new file mode 100755 index 00000000..c8dd9ffc --- /dev/null +++ b/examples/pvc-manager-deployment/test-pvc-manager.sh @@ -0,0 +1,260 @@ +#!/bin/bash + +# Test script for PVC Manager architecture +# This script validates that the PVC Manager service is working correctly + +set -e + +echo "=== OpenEBS LocalPV PVC Manager Test Suite ===" +echo + +# Configuration +NAMESPACE="openebs" +PVC_MANAGER_PORT="8080" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Helper functions +log_info() { + echo -e "${GREEN}[INFO]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +check_dependencies() { + log_info "Checking dependencies..." + + if ! command -v kubectl &> /dev/null; then + log_error "kubectl not found. Please install kubectl." + exit 1 + fi + + if ! command -v curl &> /dev/null; then + log_error "curl not found. Please install curl." + exit 1 + fi + + log_info "Dependencies check passed" +} + +check_namespace() { + log_info "Checking OpenEBS namespace..." + + if ! kubectl get namespace $NAMESPACE &> /dev/null; then + log_error "OpenEBS namespace '$NAMESPACE' not found" + exit 1 + fi + + log_info "OpenEBS namespace exists" +} + +check_pvc_manager_daemonset() { + log_info "Checking PVC Manager DaemonSet..." + + # Check if DaemonSet exists + if ! kubectl get daemonset pvc-manager -n $NAMESPACE &> /dev/null; then + log_error "PVC Manager DaemonSet not found" + exit 1 + fi + + # Check DaemonSet status + DESIRED=$(kubectl get daemonset pvc-manager -n $NAMESPACE -o jsonpath='{.status.desiredNumberScheduled}') + READY=$(kubectl get daemonset pvc-manager -n $NAMESPACE -o jsonpath='{.status.numberReady}') + + if [ "$DESIRED" != "$READY" ]; then + log_error "PVC Manager DaemonSet not ready. Desired: $DESIRED, Ready: $READY" + kubectl get pods -l app=pvc-manager -n $NAMESPACE + exit 1 + fi + + log_info "PVC Manager DaemonSet is ready ($READY/$DESIRED pods)" +} + +check_pvc_manager_health() { + log_info "Checking PVC Manager health endpoints..." + + # Get list of PVC Manager pods with their IPs + PODS=$(kubectl get pods -l app=pvc-manager -n $NAMESPACE -o jsonpath='{range .items[*]}{.metadata.name}{","}{.status.podIP}{"\n"}{end}') + + if [ -z "$PODS" ]; then + log_error "No PVC Manager pods found" + exit 1 + fi + + local success_count=0 + local total_count=0 + + echo "$PODS" | while IFS=',' read -r pod_name pod_ip; do + [ -z "$pod_name" ] && continue + total_count=$((total_count + 1)) + log_info "Testing PVC Manager health for pod: $pod_name" + + if [ -z "$pod_ip" ]; then + log_warn "Could not get IP for pod $pod_name" + continue + fi + + # Test health endpoint using pod IP + if curl -s -f "http://$pod_ip:$PVC_MANAGER_PORT/api/v1/health" > /dev/null; then + log_info "✓ Health check passed for pod $pod_name ($pod_ip)" + success_count=$((success_count + 1)) + else + log_warn "✗ Health check failed for pod $pod_name ($pod_ip)" + fi + done + + # Alternative: Test via service if available + if kubectl get service pvc-manager -n $NAMESPACE &> /dev/null; then + log_info "Testing PVC Manager service endpoint..." + if kubectl exec -n $NAMESPACE deployment/openebs-localpv-provisioner -- curl -s -f "http://pvc-manager.$NAMESPACE.svc.cluster.local:$PVC_MANAGER_PORT/api/v1/health" > /dev/null 2>&1; then + log_info "✓ Service health check passed" + else + log_warn "✗ Service health check failed" + fi + fi + + log_info "PVC Manager health checks completed" +} + +check_provisioner() { + log_info "Checking LocalPV Provisioner..." + + # Check if provisioner deployment exists + if ! kubectl get deployment openebs-localpv-provisioner -n $NAMESPACE &> /dev/null; then + log_error "LocalPV Provisioner deployment not found" + exit 1 + fi + + # Check provisioner status + REPLICAS=$(kubectl get deployment openebs-localpv-provisioner -n $NAMESPACE -o jsonpath='{.status.replicas}') + READY_REPLICAS=$(kubectl get deployment openebs-localpv-provisioner -n $NAMESPACE -o jsonpath='{.status.readyReplicas}') + + if [ "$REPLICAS" != "$READY_REPLICAS" ]; then + log_error "LocalPV Provisioner not ready. Replicas: $REPLICAS, Ready: $READY_REPLICAS" + exit 1 + fi + + log_info "LocalPV Provisioner is ready" +} + +test_pvc_creation() { + log_info "Testing PVC creation with PVC Manager..." + + # Create test PVC + local test_pvc_name="test-pvc-manager-$(date +%s)" + + cat </dev/null || echo "") + + if [ "$status" = "Bound" ]; then + log_info "✓ PVC $test_pvc_name successfully bound" + break + elif [ "$status" = "Pending" ]; then + log_info "PVC $test_pvc_name is pending..." + sleep 5 + elapsed=$((elapsed + 5)) + else + log_error "PVC $test_pvc_name has unexpected status: $status" + kubectl describe pvc $test_pvc_name -n default + cleanup_test_pvc $test_pvc_name + exit 1 + fi + done + + if [ $elapsed -ge $timeout ]; then + log_error "Timeout waiting for PVC to be bound" + kubectl describe pvc $test_pvc_name -n default + cleanup_test_pvc $test_pvc_name + exit 1 + fi + + # Cleanup test PVC + cleanup_test_pvc $test_pvc_name + log_info "PVC creation test passed" +} + +cleanup_test_pvc() { + local pvc_name=$1 + log_info "Cleaning up test PVC: $pvc_name" + kubectl delete pvc $pvc_name -n default --ignore-not-found=true + + # Wait for PVC to be deleted + local timeout=30 + local elapsed=0 + + while kubectl get pvc $pvc_name -n default &> /dev/null && [ $elapsed -lt $timeout ]; do + sleep 2 + elapsed=$((elapsed + 2)) + done +} + +show_logs() { + log_info "Showing recent PVC Manager logs..." + kubectl logs -l app=pvc-manager -n $NAMESPACE --tail=10 --prefix=true || true + + echo + log_info "Showing recent Provisioner logs..." + kubectl logs -l name=openebs-localpv-provisioner -n $NAMESPACE --tail=10 --prefix=true || true +} + +main() { + echo "Starting PVC Manager validation tests..." + echo + + check_dependencies + check_namespace + check_pvc_manager_daemonset + check_pvc_manager_health + check_provisioner + test_pvc_creation + + echo + log_info "=== All tests passed! PVC Manager is working correctly ===" + echo + + if [ "${1:-}" = "--show-logs" ]; then + show_logs + fi +} + +# Handle cleanup on script exit +cleanup() { + if [ -n "${test_pvc_name:-}" ]; then + cleanup_test_pvc $test_pvc_name + fi +} + +trap cleanup EXIT + +# Run tests +main "$@" \ No newline at end of file diff --git a/go.mod b/go.mod index 2a2041f2..629e3740 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/openebs/dynamic-localpv-provisioner go 1.19 require ( + github.com/gorilla/mux v1.8.0 github.com/onsi/ginkgo/v2 v2.14.0 github.com/onsi/gomega v1.30.0 github.com/openebs/google-analytics-4 v0.3.0 diff --git a/go.sum b/go.sum index 1aef4f2e..2898df0f 100644 --- a/go.sum +++ b/go.sum @@ -261,6 +261,7 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= diff --git a/scripts/update-reg-repo.sh b/scripts/update-reg-repo.sh index b37c16b1..1231a334 100755 --- a/scripts/update-reg-repo.sh +++ b/scripts/update-reg-repo.sh @@ -9,6 +9,7 @@ VALUES_YAML="$CHART_DIR/values.yaml" NEW_REGISTRY="ghcr.io" NEW_REPOSITORY="openebs/dev" +COMPONENT="provisioner-localpv" source "$SCRIPT_DIR/yq_utils.sh" source "$SCRIPT_DIR/log.sh" @@ -20,9 +21,11 @@ Usage: $(basename "$0") [OPTIONS] Options: --registry The registry to be updated to. --repository The repository to be updated to. + --component The component to be updated (provisioner-localpv or pvc-manager). Examples: $(basename "$0") --registry ghcr.io --repository openebs/dev + $(basename "$0") --registry ghcr.io --repository openebs/dev --component pvc-manager EOF } @@ -43,6 +46,11 @@ while [ "$#" -gt 0 ]; do NEW_REPOSITORY=$1 shift ;; + --component) + shift + COMPONENT=$1 + shift + ;; *) help log_fatal "Unknown option: $1" @@ -58,5 +66,12 @@ if [ -z "${NEW_REPOSITORY:-}" ]; then log_fatal "Missing required flag: --repository" fi -yq_ibl ".localpv.image.registry = \"$NEW_REGISTRY\"" "$VALUES_YAML" -yq_ibl ".localpv.image.repository = \"$NEW_REPOSITORY\"" "$VALUES_YAML" +if [ "$COMPONENT" = "provisioner-localpv" ]; then + yq_ibl ".localpv.image.registry = \"$NEW_REGISTRY\"" "$VALUES_YAML" + yq_ibl ".localpv.image.repository = \"$NEW_REPOSITORY\"" "$VALUES_YAML" +elif [ "$COMPONENT" = "pvc-manager" ]; then + yq_ibl ".pvcManager.image.registry = \"$NEW_REGISTRY\"" "$VALUES_YAML" + yq_ibl ".pvcManager.image.repository = \"$NEW_REPOSITORY\"" "$VALUES_YAML" +else + log_fatal "Unknown component: $COMPONENT. Supported components: provisioner-localpv, pvc-manager" +fi \ No newline at end of file