From acc85568451fac3c0bef13f348c7bb6cebe6a958 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Thu, 8 Jan 2026 11:18:18 +0800 Subject: [PATCH 01/13] feat(charts): add Helm chart for StreamNative MCP Server --- AGENTS.md | 2 + CLAUDE.md | 54 ++++++-- charts/snmcp/Chart.yaml | 18 +++ charts/snmcp/README.md | 116 +++++++++++++++++ charts/snmcp/templates/NOTES.txt | 37 ++++++ charts/snmcp/templates/_helpers.tpl | 72 +++++++++++ charts/snmcp/templates/configmap.yaml | 36 ++++++ charts/snmcp/templates/deployment.yaml | 144 +++++++++++++++++++++ charts/snmcp/templates/ingress.yaml | 45 +++++++ charts/snmcp/templates/service.yaml | 23 ++++ charts/snmcp/templates/serviceaccount.yaml | 16 +++ charts/snmcp/values.yaml | 129 ++++++++++++++++++ 12 files changed, 681 insertions(+), 11 deletions(-) create mode 100644 charts/snmcp/Chart.yaml create mode 100644 charts/snmcp/README.md create mode 100644 charts/snmcp/templates/NOTES.txt create mode 100644 charts/snmcp/templates/_helpers.tpl create mode 100644 charts/snmcp/templates/configmap.yaml create mode 100644 charts/snmcp/templates/deployment.yaml create mode 100644 charts/snmcp/templates/ingress.yaml create mode 100644 charts/snmcp/templates/service.yaml create mode 100644 charts/snmcp/templates/serviceaccount.yaml create mode 100644 charts/snmcp/values.yaml diff --git a/AGENTS.md b/AGENTS.md index 48390eb..9b40506 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -135,3 +135,5 @@ This file follows the AGENTS.md spec described in the Codex system message (scop --- Happy hacking! πŸš€ + +@CLAUDE.md as reference. \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md index 4cf6127..730afd8 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -26,7 +26,7 @@ Client Request β†’ MCP Server (pkg/mcp/server.go) ↓ Tool Handler (from builders) ↓ - Session Context (pkg/mcp/ctx.go) + Context Functions (pkg/mcp/ctx.go) ↓ Service Client (Kafka/Pulsar/SNCloud) ``` @@ -38,26 +38,36 @@ Client Request β†’ MCP Server (pkg/mcp/server.go) - Sessions provide lazy-initialized clients for each service - Context functions (`pkg/mcp/ctx.go`) inject/retrieve sessions from request context -2. **Tool Builders** (`pkg/mcp/builders/`) +2. **Tool Builders Framework** (`pkg/mcp/builders/`) - `ToolBuilder` interface: `GetName()`, `GetRequiredFeatures()`, `BuildTools()`, `Validate()` - `BaseToolBuilder` provides common feature validation logic - - Each builder creates `[]server.ServerTool` with tool definitions and handlers - - Builders in `builders/kafka/` and `builders/pulsar/` implement service-specific tools + - `ToolRegistry` manages all tool builders with concurrent-safe registration + - `ToolBuildConfig` specifies build parameters (ReadOnly, Features, Options) + - `ToolMetadata` describes tool information (Name, Version, Description, Category, Tags) -3. **Tool Registration** (`pkg/mcp/*_tools.go`) +3. **Tool Builders Organization** + - `builders/kafka/` - Kafka-specific tool builders (connect, consume, groups, partitions, produce, schema_registry, topics) + - `builders/pulsar/` - Pulsar-specific tool builders (brokers, cluster, functions, namespace, schema, sinks, sources, subscription, tenant, topic, etc.) + - `builders/streamnative/` - StreamNative Cloud tool builders + +4. **Tool Registration** (`pkg/mcp/*_tools.go`) - Each `*_tools.go` file creates a builder, builds tools, and adds them to the server - Tools are conditionally registered based on `--features` flag - Feature constants defined in `pkg/mcp/features.go` -4. **PFTools - Functions as Tools** (`pkg/mcp/pftools/`) +5. **PFTools - Functions as Tools** (`pkg/mcp/pftools/`) - `PulsarFunctionManager` dynamically converts Pulsar Functions to MCP tools - Polls for function changes and auto-registers/unregisters tools - Circuit breaker pattern (`circuit_breaker.go`) for fault tolerance - Schema conversion (`schema.go`) for input/output handling +6. **Session Management** (`pkg/mcp/session/`) + - `pulsar_session_manager.go` - LRU session cache with TTL cleanup for multi-session mode + ### Key Design Patterns - **Builder Pattern**: Tool builders create tools based on features and read-only mode +- **Registry Pattern**: ToolRegistry provides centralized management of all builders - **Context Injection**: Sessions passed via `context.Context` using typed keys - **Feature Flags**: Tools enabled/disabled via string feature identifiers - **Circuit Breaker**: PFTools uses failure thresholds to prevent cascading failures @@ -106,7 +116,7 @@ Client Request β†’ MCP Server (pkg/mcp/server.go) 4. **Get Session in Handler**: ```go - session := mcpCtx.GetKafkaSession(ctx) // or GetPulsarSession + session := mcp.GetKafkaSession(ctx) // or GetPulsarSession if session == nil { return mcp.NewToolResultError("session not found"), nil } @@ -115,10 +125,13 @@ Client Request β†’ MCP Server (pkg/mcp/server.go) ## Session Context Access -Handlers receive sessions via context (see `pkg/mcp/internal/context/ctx.go`): -- `mcpCtx.GetKafkaSession(ctx)` β†’ `*kafka.Session` -- `mcpCtx.GetPulsarSession(ctx)` β†’ `*pulsar.Session` -- `mcpCtx.GetSNCloudSession(ctx)` β†’ `*config.Session` +Handlers receive sessions via context (see `pkg/mcp/ctx.go`): +- `mcp.GetKafkaSession(ctx)` β†’ `*kafka.Session` +- `mcp.GetPulsarSession(ctx)` β†’ `*pulsar.Session` +- `mcp.GetSNCloudSession(ctx)` β†’ `*config.Session` +- `mcp.GetSNCloudOrganization(ctx)` β†’ organization string +- `mcp.GetSNCloudInstance(ctx)` β†’ instance string +- `mcp.GetSNCloudCluster(ctx)` β†’ cluster string From sessions: - `session.GetAdminClient()` / `session.GetAdminV3Client()` for Pulsar admin @@ -148,6 +161,25 @@ Key files: - `pkg/mcp/session/pulsar_session_manager.go` - LRU session cache with TTL cleanup - `pkg/cmd/mcp/server.go` - Skips global PulsarSession when multi-session enabled +## Feature Flags + +Available feature flags (defined in `pkg/mcp/features.go`): + +| Feature | Description | +|---------|-------------| +| `all` | Enable all features | +| `all-kafka` | All Kafka features | +| `all-pulsar` | All Pulsar features | +| `kafka-client` | Kafka produce/consume | +| `kafka-admin` | Kafka admin operations | +| `kafka-admin-schema-registry` | Schema Registry | +| `kafka-admin-kafka-connect` | Kafka Connect | +| `pulsar-admin` | Pulsar admin operations | +| `pulsar-client` | Pulsar produce/consume | +| `pulsar-admin-*` | Various Pulsar admin features (brokers, clusters, functions, namespaces, etc.) | +| `streamnative-cloud` | StreamNative Cloud context management | +| `functions-as-tools` | Dynamic Pulsar Functions as MCP tools | + ## Error Handling - Wrap errors: `fmt.Errorf("failed to X: %w", err)` diff --git a/charts/snmcp/Chart.yaml b/charts/snmcp/Chart.yaml new file mode 100644 index 0000000..a36ecba --- /dev/null +++ b/charts/snmcp/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +name: snmcp +description: A Helm chart for StreamNative MCP Server with Multi-Session Pulsar support +type: application +version: 0.1.0 +appVersion: "0.0.1" +home: https://github.com/streamnative/streamnative-mcp-server +sources: + - https://github.com/streamnative/streamnative-mcp-server +maintainers: + - name: StreamNative + url: https://streamnative.io +keywords: + - mcp + - streamnative + - pulsar + - ai + - llm diff --git a/charts/snmcp/README.md b/charts/snmcp/README.md new file mode 100644 index 0000000..b55c526 --- /dev/null +++ b/charts/snmcp/README.md @@ -0,0 +1,116 @@ +# StreamNative MCP Server Helm Chart + +This Helm chart deploys the StreamNative MCP Server on Kubernetes with **Multi-Session Pulsar** mode. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.0+ +- External Pulsar cluster accessible from the Kubernetes cluster + +## Installation + +```bash +helm install snmcp ./charts/snmcp \ + --set pulsar.webServiceURL=http://pulsar.example.com:8080 +``` + +## Configuration + +### Required Parameters + +| Parameter | Description | +|-----------|-------------| +| `pulsar.webServiceURL` | Pulsar web service URL (required) | + +### Server Configuration + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `server.readOnly` | `false` | Enable read-only mode | +| `server.features` | `[]` | Features to enable (default: all-pulsar) | +| `server.httpAddr` | `:9090` | HTTP server address | +| `server.httpPath` | `/mcp` | SSE endpoint path | + +### Session Configuration + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `session.cacheSize` | `100` | Max cached sessions | +| `session.ttlMinutes` | `30` | Session TTL before eviction | + +### Pulsar TLS Configuration + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `pulsar.tls.enabled` | `false` | Enable TLS for Pulsar | +| `pulsar.tls.secretName` | `""` | Secret containing TLS certs | +| `pulsar.tls.allowInsecureConnection` | `false` | Allow insecure TLS | +| `pulsar.tls.enableHostnameVerification` | `true` | Verify hostname | + +### Kubernetes Resources + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `replicaCount` | `1` | Number of replicas | +| `service.type` | `ClusterIP` | Service type | +| `service.port` | `9090` | Service port | +| `ingress.enabled` | `false` | Enable Ingress | +| `serviceAccount.create` | `true` | Create ServiceAccount | + +## Examples + +### Basic Installation + +```bash +helm install snmcp ./charts/snmcp \ + --set pulsar.webServiceURL=http://pulsar:8080 +``` + +### With Ingress + +```bash +helm install snmcp ./charts/snmcp \ + --set pulsar.webServiceURL=http://pulsar:8080 \ + --set ingress.enabled=true \ + --set ingress.hosts[0].host=mcp.example.com \ + --set ingress.hosts[0].paths[0].path=/ \ + --set ingress.hosts[0].paths[0].pathType=Prefix +``` + +### Read-Only Mode with Limited Features + +```bash +helm install snmcp ./charts/snmcp \ + --set pulsar.webServiceURL=http://pulsar:8080 \ + --set server.readOnly=true \ + --set server.features="{pulsar-admin,pulsar-client}" +``` + +### With Pulsar TLS + +```bash +# First create a secret with TLS certs +kubectl create secret generic pulsar-tls \ + --from-file=ca.crt=./ca.crt \ + --from-file=tls.crt=./tls.crt \ + --from-file=tls.key=./tls.key + +helm install snmcp ./charts/snmcp \ + --set pulsar.webServiceURL=https://pulsar:8443 \ + --set pulsar.tls.enabled=true \ + --set pulsar.tls.secretName=pulsar-tls \ + --set pulsar.tls.trustCertsFilePath=/etc/snmcp/tls/ca.crt +``` + +## Authentication + +This chart runs MCP Server in Multi-Session Pulsar mode. Each client request must include a valid Pulsar JWT token: + +```bash +curl -H "Authorization: Bearer " http://localhost:9090/mcp/sse +``` + +## License + +Apache License 2.0 diff --git a/charts/snmcp/templates/NOTES.txt b/charts/snmcp/templates/NOTES.txt new file mode 100644 index 0000000..e3f4fba --- /dev/null +++ b/charts/snmcp/templates/NOTES.txt @@ -0,0 +1,37 @@ +{{/* +Copyright 2025 StreamNative +SPDX-License-Identifier: Apache-2.0 +*/}} +StreamNative MCP Server has been deployed! + +1. Get the application URL: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ $.Values.server.httpPath }}/sse +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "snmcp.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "http://$NODE_IP:$NODE_PORT{{ .Values.server.httpPath }}/sse" +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running: + kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "snmcp.fullname" . }} + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "snmcp.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "http://$SERVICE_IP:{{ .Values.service.port }}{{ .Values.server.httpPath }}/sse" +{{- else if contains "ClusterIP" .Values.service.type }} + kubectl port-forward svc/{{ include "snmcp.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }} -n {{ .Release.Namespace }} + echo "http://localhost:{{ .Values.service.port }}{{ .Values.server.httpPath }}/sse" +{{- end }} + +2. Connect MCP clients with Authorization header: + curl -H "Authorization: Bearer " + +3. This deployment is configured for Multi-Session Pulsar mode. + Each request must include a valid Pulsar JWT token in the Authorization header. + +Configuration: + - Pulsar Web Service URL: {{ .Values.pulsar.webServiceURL }} + - Session Cache Size: {{ .Values.session.cacheSize }} + - Session TTL: {{ .Values.session.ttlMinutes }} minutes + - Read-Only Mode: {{ .Values.server.readOnly }} diff --git a/charts/snmcp/templates/_helpers.tpl b/charts/snmcp/templates/_helpers.tpl new file mode 100644 index 0000000..e214e7a --- /dev/null +++ b/charts/snmcp/templates/_helpers.tpl @@ -0,0 +1,72 @@ +{{/* +Copyright 2025 StreamNative +SPDX-License-Identifier: Apache-2.0 +*/}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "snmcp.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "snmcp.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "snmcp.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "snmcp.labels" -}} +helm.sh/chart: {{ include "snmcp.chart" . }} +{{ include "snmcp.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "snmcp.selectorLabels" -}} +app.kubernetes.io/name: {{ include "snmcp.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "snmcp.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "snmcp.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Get the image tag +*/}} +{{- define "snmcp.imageTag" -}} +{{- default .Chart.AppVersion .Values.image.tag }} +{{- end }} diff --git a/charts/snmcp/templates/configmap.yaml b/charts/snmcp/templates/configmap.yaml new file mode 100644 index 0000000..b446837 --- /dev/null +++ b/charts/snmcp/templates/configmap.yaml @@ -0,0 +1,36 @@ +{{/* +Copyright 2025 StreamNative +SPDX-License-Identifier: Apache-2.0 +*/}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "snmcp.fullname" . }} + labels: + {{- include "snmcp.labels" . | nindent 4 }} +data: + SNMCP_PULSAR_WEB_SERVICE_URL: {{ .Values.pulsar.webServiceURL | quote }} + {{- if .Values.pulsar.serviceURL }} + SNMCP_PULSAR_SERVICE_URL: {{ .Values.pulsar.serviceURL | quote }} + {{- end }} + SNMCP_SESSION_CACHE_SIZE: {{ .Values.session.cacheSize | quote }} + SNMCP_SESSION_TTL_MINUTES: {{ .Values.session.ttlMinutes | quote }} + SNMCP_HTTP_ADDR: {{ .Values.server.httpAddr | quote }} + SNMCP_HTTP_PATH: {{ .Values.server.httpPath | quote }} + SNMCP_READ_ONLY: {{ .Values.server.readOnly | quote }} + {{- if .Values.server.features }} + SNMCP_FEATURES: {{ .Values.server.features | join "," | quote }} + {{- end }} + {{- if .Values.pulsar.tls.enabled }} + SNMCP_PULSAR_TLS_ALLOW_INSECURE: {{ .Values.pulsar.tls.allowInsecureConnection | quote }} + SNMCP_PULSAR_TLS_HOSTNAME_VERIFICATION: {{ .Values.pulsar.tls.enableHostnameVerification | quote }} + {{- if .Values.pulsar.tls.trustCertsFilePath }} + SNMCP_PULSAR_TLS_TRUST_CERTS_PATH: {{ .Values.pulsar.tls.trustCertsFilePath | quote }} + {{- end }} + {{- if .Values.pulsar.tls.certFile }} + SNMCP_PULSAR_TLS_CERT_FILE: {{ .Values.pulsar.tls.certFile | quote }} + {{- end }} + {{- if .Values.pulsar.tls.keyFile }} + SNMCP_PULSAR_TLS_KEY_FILE: {{ .Values.pulsar.tls.keyFile | quote }} + {{- end }} + {{- end }} diff --git a/charts/snmcp/templates/deployment.yaml b/charts/snmcp/templates/deployment.yaml new file mode 100644 index 0000000..b4c8956 --- /dev/null +++ b/charts/snmcp/templates/deployment.yaml @@ -0,0 +1,144 @@ +{{/* +Copyright 2025 StreamNative +SPDX-License-Identifier: Apache-2.0 +*/}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "snmcp.fullname" . }} + labels: + {{- include "snmcp.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "snmcp.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "snmcp.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "snmcp.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ include "snmcp.imageTag" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - sse + - --use-external-pulsar + - --multi-session-pulsar + - --pulsar-web-service-url + - "$(SNMCP_PULSAR_WEB_SERVICE_URL)" + {{- if .Values.pulsar.serviceURL }} + - --pulsar-service-url + - "$(SNMCP_PULSAR_SERVICE_URL)" + {{- end }} + - --session-cache-size + - "$(SNMCP_SESSION_CACHE_SIZE)" + - --session-ttl-minutes + - "$(SNMCP_SESSION_TTL_MINUTES)" + - --http-addr + - "$(SNMCP_HTTP_ADDR)" + - --http-path + - "$(SNMCP_HTTP_PATH)" + {{- if .Values.server.readOnly }} + - --read-only + {{- end }} + {{- if .Values.server.features }} + - --features + - "$(SNMCP_FEATURES)" + {{- end }} + {{- if .Values.pulsar.tls.enabled }} + - --pulsar-tls-allow-insecure-connection={{ .Values.pulsar.tls.allowInsecureConnection }} + - --pulsar-tls-enable-hostname-verification={{ .Values.pulsar.tls.enableHostnameVerification }} + {{- if .Values.pulsar.tls.trustCertsFilePath }} + - --pulsar-tls-trust-certs-file-path + - "$(SNMCP_PULSAR_TLS_TRUST_CERTS_PATH)" + {{- end }} + {{- if .Values.pulsar.tls.certFile }} + - --pulsar-tls-cert-file + - "$(SNMCP_PULSAR_TLS_CERT_FILE)" + {{- end }} + {{- if .Values.pulsar.tls.keyFile }} + - --pulsar-tls-key-file + - "$(SNMCP_PULSAR_TLS_KEY_FILE)" + {{- end }} + {{- end }} + {{- if .Values.logging.enabled }} + - --log-file + - {{ .Values.logging.logFile | quote }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "snmcp.fullname" . }} + ports: + - name: http + containerPort: 9090 + protocol: TCP + livenessProbe: + httpGet: + path: {{ .Values.server.httpPath }}/sse + port: http + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: {{ .Values.server.httpPath }}/sse + port: http + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- if or .Values.pulsar.tls.enabled .Values.logging.enabled }} + volumeMounts: + {{- if and .Values.pulsar.tls.enabled .Values.pulsar.tls.secretName }} + - name: pulsar-tls + mountPath: /etc/snmcp/tls + readOnly: true + {{- end }} + {{- if .Values.logging.enabled }} + - name: logs + mountPath: /tmp + {{- end }} + {{- end }} + {{- if or .Values.pulsar.tls.enabled .Values.logging.enabled }} + volumes: + {{- if and .Values.pulsar.tls.enabled .Values.pulsar.tls.secretName }} + - name: pulsar-tls + secret: + secretName: {{ .Values.pulsar.tls.secretName }} + {{- end }} + {{- if .Values.logging.enabled }} + - name: logs + emptyDir: {} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/snmcp/templates/ingress.yaml b/charts/snmcp/templates/ingress.yaml new file mode 100644 index 0000000..b214f9f --- /dev/null +++ b/charts/snmcp/templates/ingress.yaml @@ -0,0 +1,45 @@ +{{/* +Copyright 2025 StreamNative +SPDX-License-Identifier: Apache-2.0 +*/}} +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "snmcp.fullname" . }} + labels: + {{- include "snmcp.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ include "snmcp.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/snmcp/templates/service.yaml b/charts/snmcp/templates/service.yaml new file mode 100644 index 0000000..1cfd105 --- /dev/null +++ b/charts/snmcp/templates/service.yaml @@ -0,0 +1,23 @@ +{{/* +Copyright 2025 StreamNative +SPDX-License-Identifier: Apache-2.0 +*/}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "snmcp.fullname" . }} + labels: + {{- include "snmcp.labels" . | nindent 4 }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "snmcp.selectorLabels" . | nindent 4 }} diff --git a/charts/snmcp/templates/serviceaccount.yaml b/charts/snmcp/templates/serviceaccount.yaml new file mode 100644 index 0000000..028260d --- /dev/null +++ b/charts/snmcp/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{/* +Copyright 2025 StreamNative +SPDX-License-Identifier: Apache-2.0 +*/}} +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "snmcp.serviceAccountName" . }} + labels: + {{- include "snmcp.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/snmcp/values.yaml b/charts/snmcp/values.yaml new file mode 100644 index 0000000..4a0880b --- /dev/null +++ b/charts/snmcp/values.yaml @@ -0,0 +1,129 @@ +# StreamNative MCP Server Helm Chart Values +# This chart deploys MCP Server in Multi-Session Pulsar mode + +# Image configuration +image: + repository: streamnative/snmcp + tag: "" # Defaults to chart appVersion + pullPolicy: IfNotPresent + pullSecrets: [] + +# Number of replicas +replicaCount: 1 + +# Server configuration +server: + # Enable read-only mode (disables write operations) + readOnly: false + # Features to enable (e.g., "all-pulsar", "pulsar-admin", "pulsar-client") + # Leave empty to use default (all-pulsar) + features: [] + # HTTP server address + httpAddr: ":9090" + # HTTP server path for SSE endpoint + httpPath: "/mcp" + +# Pulsar cluster configuration (required) +pulsar: + # Pulsar web service URL (required) + webServiceURL: "" + # Pulsar broker service URL (optional, for client operations) + serviceURL: "" + # TLS configuration for Pulsar connection + tls: + enabled: false + # Name of existing secret containing TLS certs + secretName: "" + # Allow insecure TLS connection + allowInsecureConnection: false + # Enable hostname verification + enableHostnameVerification: true + # Path to trust certs file (mounted from secret) + trustCertsFilePath: "" + # Path to client cert file (mounted from secret) + certFile: "" + # Path to client key file (mounted from secret) + keyFile: "" + +# Session management configuration +# Note: Multi-session mode is always enabled in this chart +session: + # Maximum number of cached Pulsar sessions + cacheSize: 100 + # Session TTL in minutes before eviction + ttlMinutes: 30 + +# Kubernetes Service configuration +service: + type: ClusterIP + port: 9090 + annotations: {} + +# Ingress configuration +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: mcp.example.com + paths: + - path: / + pathType: Prefix + tls: [] + # - secretName: mcp-tls + # hosts: + # - mcp.example.com + +# ServiceAccount configuration +serviceAccount: + # Create a service account + create: true + # Annotations for the service account + annotations: {} + # Name of the service account (auto-generated if empty) + name: "" + +# Pod resource limits and requests +resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# Pod annotations +podAnnotations: {} + +# Pod security context +podSecurityContext: + fsGroup: 1000 + +# Container security context +securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + +# Node selector +nodeSelector: {} + +# Tolerations +tolerations: [] + +# Affinity rules +affinity: {} + +# Logging configuration +logging: + enabled: false + # Log file path (inside container) + logFile: "/tmp/snmcp.log" + +# Reserved for future Multi-Session Kafka support +# kafka: +# enabled: false +# bootstrapServers: "" From 38fab48079ff7f3dd3b5505e3efe01e393ce66a1 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Thu, 8 Jan 2026 12:00:14 +0800 Subject: [PATCH 02/13] refactor(server): improve SSE server structure and add health endpoints --- .github/workflows/lint.yaml | 4 +- .golangci.yml | 19 +-- charts/snmcp/README.md | 7 +- charts/snmcp/templates/deployment.yaml | 4 +- charts/snmcp/values.yaml | 2 +- pkg/cmd/mcp/sse.go | 156 +++++++++++++------------ pkg/schema/common.go | 2 +- 7 files changed, 107 insertions(+), 87 deletions(-) diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index a37813e..7b2451f 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -24,7 +24,7 @@ jobs: go mod verify go mod download - LINT_VERSION=1.64.8 + LINT_VERSION=2.7.2 curl -fsSL https://github.com/golangci/golangci-lint/releases/download/v${LINT_VERSION}/golangci-lint-${LINT_VERSION}-linux-amd64.tar.gz | \ tar xz --strip-components 1 --wildcards \*/golangci-lint mkdir -p bin && mv golangci-lint bin/ @@ -45,6 +45,6 @@ jobs: assert-nothing-changed go fmt ./... assert-nothing-changed go mod tidy - bin/golangci-lint run --out-format=colored-line-number --timeout=3m || STATUS=$? + bin/golangci-lint run --timeout=3m || STATUS=$? exit $STATUS diff --git a/.golangci.yml b/.golangci.yml index 43e3d62..54324e5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,3 +1,5 @@ +version: "2" + run: timeout: 5m tests: true @@ -8,13 +10,9 @@ linters: - govet - errcheck - staticcheck - - gofmt - - goimports - revive - ineffassign - - typecheck - unused - - gosimple - misspell - nakedret - bodyclose @@ -22,7 +20,14 @@ linters: - makezero - gosec +formatters: + enable: + - gofmt + - goimports + output: - formats: colored-line-number - print-issued-lines: true - print-linter-name: true + formats: + text: + path: stdout + print-issued-lines: true + print-linter-name: true diff --git a/charts/snmcp/README.md b/charts/snmcp/README.md index b55c526..592a782 100644 --- a/charts/snmcp/README.md +++ b/charts/snmcp/README.md @@ -30,7 +30,7 @@ helm install snmcp ./charts/snmcp \ | `server.readOnly` | `false` | Enable read-only mode | | `server.features` | `[]` | Features to enable (default: all-pulsar) | | `server.httpAddr` | `:9090` | HTTP server address | -| `server.httpPath` | `/mcp` | SSE endpoint path | +| `server.httpPath` | `/mcp` | Base path for SSE/message/health endpoints | ### Session Configuration @@ -111,6 +111,11 @@ This chart runs MCP Server in Multi-Session Pulsar mode. Each client request mus curl -H "Authorization: Bearer " http://localhost:9090/mcp/sse ``` +Health endpoints do not require authentication and can be used for liveness/readiness: + +- `GET http://localhost:9090/mcp/healthz` +- `GET http://localhost:9090/mcp/readyz` + ## License Apache License 2.0 diff --git a/charts/snmcp/templates/deployment.yaml b/charts/snmcp/templates/deployment.yaml index b4c8956..25f03f2 100644 --- a/charts/snmcp/templates/deployment.yaml +++ b/charts/snmcp/templates/deployment.yaml @@ -90,7 +90,7 @@ spec: protocol: TCP livenessProbe: httpGet: - path: {{ .Values.server.httpPath }}/sse + path: {{ trimSuffix "/" .Values.server.httpPath }}/healthz port: http initialDelaySeconds: 10 periodSeconds: 30 @@ -98,7 +98,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: {{ .Values.server.httpPath }}/sse + path: {{ trimSuffix "/" .Values.server.httpPath }}/readyz port: http initialDelaySeconds: 5 periodSeconds: 10 diff --git a/charts/snmcp/values.yaml b/charts/snmcp/values.yaml index 4a0880b..c3133c1 100644 --- a/charts/snmcp/values.yaml +++ b/charts/snmcp/values.yaml @@ -20,7 +20,7 @@ server: features: [] # HTTP server address httpAddr: ":9090" - # HTTP server path for SSE endpoint + # HTTP server base path for SSE, message, and health endpoints httpPath: "/mcp" # Pulsar cluster configuration (required) diff --git a/pkg/cmd/mcp/sse.go b/pkg/cmd/mcp/sse.go index 2620f3a..890b052 100644 --- a/pkg/cmd/mcp/sse.go +++ b/pkg/cmd/mcp/sse.go @@ -20,6 +20,7 @@ import ( "net/http" "os" "os/signal" + "path" "syscall" "time" @@ -29,8 +30,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/streamnative/streamnative-mcp-server/pkg/common" - "github.com/streamnative/streamnative-mcp-server/pkg/mcp" - context2 "github.com/streamnative/streamnative-mcp-server/pkg/mcp" + mcpctx "github.com/streamnative/streamnative-mcp-server/pkg/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/session" "github.com/streamnative/streamnative-mcp-server/pkg/pulsar" ) @@ -76,12 +76,12 @@ func runSseServer(configOpts *ServerOptions) error { } // 4. Set the context - ctx = context2.WithSNCloudSession(ctx, mcpServer.SNCloudSession) - ctx = context2.WithPulsarSession(ctx, mcpServer.PulsarSession) - ctx = context2.WithKafkaSession(ctx, mcpServer.KafkaSession) + ctx = mcpctx.WithSNCloudSession(ctx, mcpServer.SNCloudSession) + ctx = mcpctx.WithPulsarSession(ctx, mcpServer.PulsarSession) + ctx = mcpctx.WithKafkaSession(ctx, mcpServer.KafkaSession) if configOpts.Options.KeyFile != "" { if configOpts.Options.PulsarInstance != "" && configOpts.Options.PulsarCluster != "" { - err = mcp.SetContext(ctx, configOpts.Options, configOpts.Options.PulsarInstance, configOpts.Options.PulsarCluster) + err = mcpctx.SetContext(ctx, configOpts.Options, configOpts.Options.PulsarInstance, configOpts.Options.PulsarCluster) if err != nil { return errors.Wrap(err, "failed to set StreamNative Cloud context") } @@ -115,34 +115,42 @@ func runSseServer(configOpts *ServerOptions) error { logger.Info("Multi-session Pulsar mode enabled") } - sseServer := server.NewSSEServer( - mcpServer.MCPServer, - server.WithStaticBasePath(configOpts.HTTPPath), - server.WithSSEContextFunc(func(ctx context.Context, r *http.Request) context.Context { - c := context.WithValue(ctx, common.OptionsKey, configOpts.Options) - c = context2.WithKafkaSession(c, mcpServer.KafkaSession) - c = context2.WithSNCloudSession(c, mcpServer.SNCloudSession) + mux := http.NewServeMux() + httpServer := &http.Server{ + Addr: configOpts.HTTPAddr, + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, // Prevent Slowloris attacks + } + sseContextFunc := func(ctx context.Context, r *http.Request) context.Context { + c := context.WithValue(ctx, common.OptionsKey, configOpts.Options) + c = mcpctx.WithKafkaSession(c, mcpServer.KafkaSession) + c = mcpctx.WithSNCloudSession(c, mcpServer.SNCloudSession) - // Handle per-user Pulsar sessions - if pulsarSessionManager != nil { - token := session.ExtractBearerToken(r) - // Token is already validated in auth middleware, this should always succeed - if pulsarSession, err := pulsarSessionManager.GetOrCreateSession(ctx, token); err == nil { - c = context2.WithPulsarSession(c, pulsarSession) - if token != "" { - c = session.WithUserTokenHash(c, pulsarSessionManager.HashTokenForLog(token)) - } - } else { - // Should not happen since middleware validates token first - logger.WithError(err).Error("Unexpected auth error after middleware validation") - // Don't set PulsarSession - tool handlers will fail gracefully with "session not found" + // Handle per-user Pulsar sessions + if pulsarSessionManager != nil { + token := session.ExtractBearerToken(r) + // Token is already validated in auth middleware, this should always succeed + if pulsarSession, err := pulsarSessionManager.GetOrCreateSession(ctx, token); err == nil { + c = mcpctx.WithPulsarSession(c, pulsarSession) + if token != "" { + c = session.WithUserTokenHash(c, pulsarSessionManager.HashTokenForLog(token)) } } else { - c = context2.WithPulsarSession(c, mcpServer.PulsarSession) + // Should not happen since middleware validates token first + logger.WithError(err).Error("Unexpected auth error after middleware validation") + // Don't set PulsarSession - tool handlers will fail gracefully with "session not found" } + } else { + c = mcpctx.WithPulsarSession(c, mcpServer.PulsarSession) + } - return c - }), + return c + } + sseServer := server.NewSSEServer( + mcpServer.MCPServer, + server.WithHTTPServer(httpServer), + server.WithStaticBasePath(configOpts.HTTPPath), + server.WithSSEContextFunc(sseContextFunc), ) // 4. Expose the full SSE URL to the user @@ -151,16 +159,15 @@ func runSseServer(configOpts *ServerOptions) error { fmt.Fprintf(os.Stderr, "StreamNative Cloud MCP Server listening on http://%s%s\n", configOpts.HTTPAddr, ssePath) - // 5. Run the HTTP listener in a goroutine - errCh := make(chan error, 1) - var httpServer *http.Server + healthPath := joinHTTPPath(configOpts.HTTPPath, "healthz") + readyPath := joinHTTPPath(configOpts.HTTPPath, "readyz") + authMiddleware := func(next http.Handler) http.Handler { + return next + } if pulsarSessionManager != nil { - // Multi-session mode: use custom handlers with auth middleware - mux := http.NewServeMux() - - // Auth middleware wrapper that validates token before processing - authMiddleware := func(next http.Handler) http.Handler { + // Multi-session mode: validate token before processing SSE/message requests + authMiddleware = func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { token := session.ExtractBearerToken(r) if token == "" { @@ -178,31 +185,21 @@ func runSseServer(configOpts *ServerOptions) error { next.ServeHTTP(w, r) }) } + logger.Info("SSE server started with authentication middleware") + } - // Mount handlers with auth middleware - mux.Handle(ssePath, authMiddleware(sseServer.SSEHandler())) - mux.Handle(msgPath, authMiddleware(sseServer.MessageHandler())) + mux.Handle(ssePath, authMiddleware(sseServer.SSEHandler())) + mux.Handle(msgPath, authMiddleware(sseServer.MessageHandler())) + mux.HandleFunc(healthPath, healthHandler("ok")) + mux.HandleFunc(readyPath, healthHandler("ready")) - // Start custom HTTP server - httpServer = &http.Server{ - Addr: configOpts.HTTPAddr, - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, // Prevent Slowloris attacks + // 5. Run the HTTP listener in a goroutine + errCh := make(chan error, 1) + go func() { + if err := sseServer.Start(configOpts.HTTPAddr); err != nil && !errors.Is(err, http.ErrServerClosed) { + errCh <- err // bubble up real crashes } - go func() { - if err := httpServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { - errCh <- err - } - }() - logger.Info("SSE server started with authentication middleware") - } else { - // Non-multi-session mode: use default Start() - go func() { - if err := sseServer.Start(configOpts.HTTPAddr); err != nil && !errors.Is(err, http.ErrServerClosed) { - errCh <- err // bubble up real crashes - } - }() - } + }() // Give the server a moment to start time.Sleep(100 * time.Millisecond) @@ -226,20 +223,10 @@ func runSseServer(configOpts *ServerOptions) error { pulsarSessionManager.Stop() } - // Shut down the HTTP server - if httpServer != nil { - // Multi-session mode: shut down custom HTTP server - if err := httpServer.Shutdown(shCtx); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - logger.Errorf("Error shutting down HTTP server: %v", err) - } - } - } else { - // Non-multi-session mode: shut down SSE server - if err := sseServer.Shutdown(shCtx); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - logger.Errorf("Error shutting down SSE server: %v", err) - } + // Shut down the SSE server (also closes the underlying HTTP server) + if err := sseServer.Shutdown(shCtx); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + logger.Errorf("Error shutting down SSE server: %v", err) } } @@ -254,3 +241,26 @@ func runSseServer(configOpts *ServerOptions) error { fmt.Fprintln(os.Stderr, "SSE server stopped gracefully") return nil } + +func joinHTTPPath(basePath string, suffix string) string { + joined := path.Join(basePath, suffix) + if joined == "" { + return "/" + suffix + } + if joined[0] != '/' { + return "/" + joined + } + return joined +} + +func healthHandler(status string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(status)) + } +} diff --git a/pkg/schema/common.go b/pkg/schema/common.go index 5bf712e..569eae5 100644 --- a/pkg/schema/common.go +++ b/pkg/schema/common.go @@ -20,7 +20,7 @@ import ( "github.com/apache/pulsar-client-go/pulsar" ) -// GetSchemaType θΏ”ε›žSchemaη±»εž‹ηš„ε­—η¬¦δΈ²θ‘¨η€Ί +// GetSchemaType returns the string representation of a schema type. func GetSchemaType(schemaType pulsar.SchemaType) string { switch schemaType { case pulsar.AVRO: From 9c960218878a3078c0c569da1f7305502557daca Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Thu, 8 Jan 2026 13:27:30 +0800 Subject: [PATCH 03/13] feat(e2e): add e2e testing setup with Pulsar and Helm --- .gitignore | 1 + charts/snmcp/e2e/.gitkeep | 0 charts/snmcp/e2e/pulsar-values.yaml | 12 ++ charts/snmcp/e2e/test-secret.key | 1 + charts/snmcp/e2e/test-tokens.env | 2 + cmd/snmcp-e2e/.gitkeep | 0 scripts/.gitkeep | 0 scripts/e2e-test.sh | 187 ++++++++++++++++++++++++++++ 8 files changed, 203 insertions(+) create mode 100644 charts/snmcp/e2e/.gitkeep create mode 100644 charts/snmcp/e2e/pulsar-values.yaml create mode 100644 charts/snmcp/e2e/test-secret.key create mode 100644 charts/snmcp/e2e/test-tokens.env create mode 100644 cmd/snmcp-e2e/.gitkeep create mode 100644 scripts/.gitkeep create mode 100755 scripts/e2e-test.sh diff --git a/.gitignore b/.gitignore index 577a846..9a5e603 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ vendor .cursor/ agents/ .serena/ +.envrc diff --git a/charts/snmcp/e2e/.gitkeep b/charts/snmcp/e2e/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/charts/snmcp/e2e/pulsar-values.yaml b/charts/snmcp/e2e/pulsar-values.yaml new file mode 100644 index 0000000..16f6608 --- /dev/null +++ b/charts/snmcp/e2e/pulsar-values.yaml @@ -0,0 +1,12 @@ +pulsar: + image: snstage/pulsar-all:4.1.0.10 + ports: + broker: 6650 + web: 8080 + auth: + enabled: true + tokenSecretKeyFile: /pulsarctl/test/auth/token/secret.key + superUserRoles: + - admin + resources: + javaToolOptions: "-Xms256m -Xmx512m" diff --git a/charts/snmcp/e2e/test-secret.key b/charts/snmcp/e2e/test-secret.key new file mode 100644 index 0000000..40f5ee8 --- /dev/null +++ b/charts/snmcp/e2e/test-secret.key @@ -0,0 +1 @@ +snmcp-e2e-test-secret-key diff --git a/charts/snmcp/e2e/test-tokens.env b/charts/snmcp/e2e/test-tokens.env new file mode 100644 index 0000000..ea8faba --- /dev/null +++ b/charts/snmcp/e2e/test-tokens.env @@ -0,0 +1,2 @@ +ADMIN_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiIsImlhdCI6MTcwMDAwMDAwMCwiZXhwIjo0MTAyNDQ0ODAwfQ.B-SBQ921h_j8UCPcpfgZgqcIz5TopSE6i6tQFGdD7Ls +TEST_USER_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0LXVzZXIiLCJpYXQiOjE3MDAwMDAwMDAsImV4cCI6NDEwMjQ0NDgwMH0.v2r1TsfZ_vEmPNeF9q5O5GUsnskPtLXogbQ_FIkCffo diff --git a/cmd/snmcp-e2e/.gitkeep b/cmd/snmcp-e2e/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/scripts/.gitkeep b/scripts/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh new file mode 100755 index 0000000..74131c7 --- /dev/null +++ b/scripts/e2e-test.sh @@ -0,0 +1,187 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +E2E_DIR="${ROOT_DIR}/charts/snmcp/e2e" + +PULSAR_CONTAINER="${PULSAR_CONTAINER:-pulsar-standalone}" +PULSAR_IMAGE="${PULSAR_IMAGE:-snstage/pulsar-all:4.1.0.10}" +KIND_NETWORK="${KIND_NETWORK:-kind}" +PULSAR_WEB_PORT="${PULSAR_WEB_PORT:-8080}" +PULSAR_BROKER_PORT="${PULSAR_BROKER_PORT:-6650}" +PULSAR_STARTUP_TIMEOUT="${PULSAR_STARTUP_TIMEOUT:-180}" +PULSAR_STARTUP_INTERVAL="${PULSAR_STARTUP_INTERVAL:-3}" + +SNMCP_RELEASE="${SNMCP_RELEASE:-snmcp}" +SNMCP_NAMESPACE="${SNMCP_NAMESPACE:-default}" +SNMCP_CHART_DIR="${SNMCP_CHART_DIR:-${ROOT_DIR}/charts/snmcp}" +SNMCP_FEATURES="${SNMCP_FEATURES:-pulsar-admin,pulsar-client}" +SNMCP_IMAGE_REPO="${SNMCP_IMAGE_REPO:-}" +SNMCP_IMAGE_TAG="${SNMCP_IMAGE_TAG:-}" +SNMCP_WAIT_TIMEOUT="${SNMCP_WAIT_TIMEOUT:-180s}" + +TOKEN_ENV_FILE="${TOKEN_ENV_FILE:-${E2E_DIR}/test-tokens.env}" +TOKEN_SECRET_FILE="${TOKEN_SECRET_FILE:-${E2E_DIR}/test-secret.key}" + +log() { + echo "[e2e] $*" +} + +die() { + echo "[e2e] $*" >&2 + exit 1 +} + +require_cmd() { + command -v "$1" >/dev/null 2>&1 || die "missing command: $1" +} + +load_tokens() { + [[ -f "$TOKEN_ENV_FILE" ]] || die "missing token env file: $TOKEN_ENV_FILE" + set -a + # shellcheck disable=SC1090 + source "$TOKEN_ENV_FILE" + set +a + [[ -n "${ADMIN_TOKEN:-}" ]] || die "ADMIN_TOKEN not set in $TOKEN_ENV_FILE" +} + +ensure_kind_network() { + docker network inspect "$KIND_NETWORK" >/dev/null 2>&1 || die "missing kind network: $KIND_NETWORK" +} + +pulsar_ip() { + docker inspect -f "{{.NetworkSettings.Networks.${KIND_NETWORK}.IPAddress}}" "$PULSAR_CONTAINER" +} + +setup_pulsar() { + require_cmd docker + require_cmd curl + ensure_kind_network + load_tokens + [[ -f "$TOKEN_SECRET_FILE" ]] || die "missing secret key file: $TOKEN_SECRET_FILE" + + if docker ps -a --format '{{.Names}}' | grep -qx "$PULSAR_CONTAINER"; then + log "removing existing container: $PULSAR_CONTAINER" + docker rm -f "$PULSAR_CONTAINER" >/dev/null + fi + + log "starting pulsar container: $PULSAR_CONTAINER" + docker run -d \ + --name "$PULSAR_CONTAINER" \ + --network "$KIND_NETWORK" \ + -e PULSAR_PREFIX_authenticationEnabled=true \ + -e PULSAR_PREFIX_authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderToken \ + -e PULSAR_PREFIX_authorizationEnabled=true \ + -e PULSAR_PREFIX_superUserRoles=admin \ + -e PULSAR_PREFIX_tokenSecretKey=file:///pulsarctl/test/auth/token/secret.key \ + -e PULSAR_PREFIX_brokerClientAuthenticationPlugin=org.apache.pulsar.client.impl.auth.AuthenticationToken \ + -e PULSAR_PREFIX_brokerClientAuthenticationParameters="token:${ADMIN_TOKEN}" \ + -v "$TOKEN_SECRET_FILE:/pulsarctl/test/auth/token/secret.key:ro" \ + "$PULSAR_IMAGE" \ + bash -lc 'set -- $(hostname -i); export PULSAR_PREFIX_advertisedAddress=$1; exec bin/pulsar standalone' \ + >/dev/null + + log "waiting for pulsar to be ready" + local deadline=$((SECONDS + PULSAR_STARTUP_TIMEOUT)) + while ((SECONDS < deadline)); do + local ip + if ip="$(pulsar_ip 2>/dev/null)" && [[ -n "$ip" ]]; then + if curl -fsS "http://${ip}:${PULSAR_WEB_PORT}/admin/v2/clusters" \ + -H "Authorization: Bearer ${ADMIN_TOKEN}" >/dev/null; then + log "pulsar ready at ${ip}" + return 0 + fi + fi + sleep "$PULSAR_STARTUP_INTERVAL" + done + + die "pulsar not ready after ${PULSAR_STARTUP_TIMEOUT}s" +} + +deploy_mcp() { + require_cmd helm + require_cmd kubectl + require_cmd docker + ensure_kind_network + + local ip + ip="$(pulsar_ip)" + [[ -n "$ip" ]] || die "failed to resolve pulsar container IP" + + local helm_args=( + --namespace "$SNMCP_NAMESPACE" + --create-namespace + --set "pulsar.webServiceURL=http://${ip}:${PULSAR_WEB_PORT}" + --set "pulsar.serviceURL=pulsar://${ip}:${PULSAR_BROKER_PORT}" + --set "server.features={${SNMCP_FEATURES}}" + --wait + --timeout "$SNMCP_WAIT_TIMEOUT" + ) + + if [[ -n "$SNMCP_IMAGE_REPO" ]]; then + helm_args+=(--set "image.repository=${SNMCP_IMAGE_REPO}") + fi + if [[ -n "$SNMCP_IMAGE_TAG" ]]; then + helm_args+=(--set "image.tag=${SNMCP_IMAGE_TAG}") + fi + + log "deploying snmcp with helm" + helm upgrade --install "$SNMCP_RELEASE" "$SNMCP_CHART_DIR" "${helm_args[@]}" >/dev/null + + log "waiting for snmcp deployment rollout" + kubectl rollout status "deployment/${SNMCP_RELEASE}" \ + --namespace "$SNMCP_NAMESPACE" \ + --timeout "$SNMCP_WAIT_TIMEOUT" \ + >/dev/null + + log "snmcp deployed and ready" +} + +cleanup() { + require_cmd docker + + if command -v helm >/dev/null 2>&1; then + helm uninstall "$SNMCP_RELEASE" --namespace "$SNMCP_NAMESPACE" >/dev/null 2>&1 || true + fi + + if docker ps -a --format '{{.Names}}' | grep -qx "$PULSAR_CONTAINER"; then + docker rm -f "$PULSAR_CONTAINER" >/dev/null + fi +} + +usage() { + cat < + +Commands: + setup-pulsar Start Pulsar standalone with JWT auth on the kind network + deploy-mcp Deploy snmcp Helm chart and wait for readiness + cleanup Remove Pulsar container and uninstall snmcp release + all Run setup-pulsar then deploy-mcp +USAGE +} + +main() { + local cmd="${1:-}" + case "$cmd" in + setup-pulsar) + setup_pulsar + ;; + deploy-mcp) + deploy_mcp + ;; + cleanup) + cleanup + ;; + all) + setup_pulsar + deploy_mcp + ;; + *) + usage + exit 1 + ;; + esac +} + +main "$@" From a2ec45b9aff45129feb236feae883bc1cfbb368b Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Thu, 8 Jan 2026 23:15:51 +0800 Subject: [PATCH 04/13] ci(e2e): add E2E workflow and update related configurations --- .github/workflows/e2e.yaml | 56 +++ charts/snmcp/e2e/pulsar-values.yaml | 2 +- charts/snmcp/e2e/test-secret.key | 2 +- charts/snmcp/e2e/test-tokens.env | 4 +- charts/snmcp/templates/configmap.yaml | 1 + charts/snmcp/templates/deployment.yaml | 12 +- charts/snmcp/values.yaml | 2 + cmd/snmcp-e2e/main.go | 536 +++++++++++++++++++++++++ scripts/e2e-test.sh | 80 +++- 9 files changed, 684 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/e2e.yaml create mode 100644 cmd/snmcp-e2e/main.go diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 0000000..fbeb483 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,56 @@ +name: E2E Tests + +on: + push: + paths: + - "charts/**" + - "cmd/snmcp-e2e/**" + - "scripts/e2e-test.sh" + - ".github/workflows/e2e.yaml" + pull_request: + paths: + - "charts/**" + - "cmd/snmcp-e2e/**" + - "scripts/e2e-test.sh" + - ".github/workflows/e2e.yaml" + workflow_dispatch: + +permissions: + contents: read + +jobs: + e2e: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + + - name: Set up Helm + uses: azure/setup-helm@v4 + + - name: Set up Kind + uses: helm/kind-action@v1 + + - name: Download dependencies + run: go mod download + + - name: Start Pulsar + run: ./scripts/e2e-test.sh setup-pulsar + + - name: Build and load snmcp image + run: ./scripts/e2e-test.sh build-image + + - name: Deploy snmcp + run: ./scripts/e2e-test.sh deploy-mcp + + - name: Run E2E tests + run: ./scripts/e2e-test.sh run-tests + + - name: Cleanup + if: always() + run: ./scripts/e2e-test.sh cleanup diff --git a/charts/snmcp/e2e/pulsar-values.yaml b/charts/snmcp/e2e/pulsar-values.yaml index 16f6608..f68cd91 100644 --- a/charts/snmcp/e2e/pulsar-values.yaml +++ b/charts/snmcp/e2e/pulsar-values.yaml @@ -1,5 +1,5 @@ pulsar: - image: snstage/pulsar-all:4.1.0.10 + image: apachepulsar/pulsar-all:4.1.0 ports: broker: 6650 web: 8080 diff --git a/charts/snmcp/e2e/test-secret.key b/charts/snmcp/e2e/test-secret.key index 40f5ee8..f3b893b 100644 --- a/charts/snmcp/e2e/test-secret.key +++ b/charts/snmcp/e2e/test-secret.key @@ -1 +1 @@ -snmcp-e2e-test-secret-key +snmcp-e2e-test-secret-key-32-bytes-long diff --git a/charts/snmcp/e2e/test-tokens.env b/charts/snmcp/e2e/test-tokens.env index ea8faba..1c47a00 100644 --- a/charts/snmcp/e2e/test-tokens.env +++ b/charts/snmcp/e2e/test-tokens.env @@ -1,2 +1,2 @@ -ADMIN_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiIsImlhdCI6MTcwMDAwMDAwMCwiZXhwIjo0MTAyNDQ0ODAwfQ.B-SBQ921h_j8UCPcpfgZgqcIz5TopSE6i6tQFGdD7Ls -TEST_USER_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0LXVzZXIiLCJpYXQiOjE3MDAwMDAwMDAsImV4cCI6NDEwMjQ0NDgwMH0.v2r1TsfZ_vEmPNeF9q5O5GUsnskPtLXogbQ_FIkCffo +ADMIN_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQxMDI0NDQ4MDAsImlhdCI6MTcwMDAwMDAwMCwic3ViIjoiYWRtaW4ifQ.fvMIzcCv16QvecEd8rJS6GZaJP_FeFw-XndtfRMfZyc +TEST_USER_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQxMDI0NDQ4MDAsImlhdCI6MTcwMDAwMDAwMCwic3ViIjoidGVzdC11c2VyIn0.gv49qzkZrtc-6aXMGxSGFpRLk_C3pnFI4SprgewhN54 diff --git a/charts/snmcp/templates/configmap.yaml b/charts/snmcp/templates/configmap.yaml index b446837..b355b8e 100644 --- a/charts/snmcp/templates/configmap.yaml +++ b/charts/snmcp/templates/configmap.yaml @@ -17,6 +17,7 @@ data: SNMCP_SESSION_TTL_MINUTES: {{ .Values.session.ttlMinutes | quote }} SNMCP_HTTP_ADDR: {{ .Values.server.httpAddr | quote }} SNMCP_HTTP_PATH: {{ .Values.server.httpPath | quote }} + SNMCP_CONFIG_DIR: {{ .Values.server.configDir | quote }} SNMCP_READ_ONLY: {{ .Values.server.readOnly | quote }} {{- if .Values.server.features }} SNMCP_FEATURES: {{ .Values.server.features | join "," | quote }} diff --git a/charts/snmcp/templates/deployment.yaml b/charts/snmcp/templates/deployment.yaml index 25f03f2..31474a0 100644 --- a/charts/snmcp/templates/deployment.yaml +++ b/charts/snmcp/templates/deployment.yaml @@ -106,8 +106,12 @@ spec: failureThreshold: 3 resources: {{- toYaml .Values.resources | nindent 12 }} - {{- if or .Values.pulsar.tls.enabled .Values.logging.enabled }} + {{- if or .Values.pulsar.tls.enabled .Values.logging.enabled .Values.server.configDir }} volumeMounts: + {{- if .Values.server.configDir }} + - name: config-dir + mountPath: {{ .Values.server.configDir | quote }} + {{- end }} {{- if and .Values.pulsar.tls.enabled .Values.pulsar.tls.secretName }} - name: pulsar-tls mountPath: /etc/snmcp/tls @@ -118,8 +122,12 @@ spec: mountPath: /tmp {{- end }} {{- end }} - {{- if or .Values.pulsar.tls.enabled .Values.logging.enabled }} + {{- if or .Values.pulsar.tls.enabled .Values.logging.enabled .Values.server.configDir }} volumes: + {{- if .Values.server.configDir }} + - name: config-dir + emptyDir: {} + {{- end }} {{- if and .Values.pulsar.tls.enabled .Values.pulsar.tls.secretName }} - name: pulsar-tls secret: diff --git a/charts/snmcp/values.yaml b/charts/snmcp/values.yaml index c3133c1..6c642d9 100644 --- a/charts/snmcp/values.yaml +++ b/charts/snmcp/values.yaml @@ -22,6 +22,8 @@ server: httpAddr: ":9090" # HTTP server base path for SSE, message, and health endpoints httpPath: "/mcp" + # Config directory for snmcp state + configDir: "/var/lib/snmcp" # Pulsar cluster configuration (required) pulsar: diff --git a/cmd/snmcp-e2e/main.go b/cmd/snmcp-e2e/main.go new file mode 100644 index 0000000..e178077 --- /dev/null +++ b/cmd/snmcp-e2e/main.go @@ -0,0 +1,536 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/mcp" +) + +type config struct { + httpBaseURL string + adminToken string + testUserToken string + timeout time.Duration + verbose bool +} + +func main() { + cfg, err := parseConfig() + if err != nil { + fmt.Fprintf(os.Stderr, "config error: %v\n", err) + os.Exit(1) + } + + ctx, cancel := context.WithTimeout(context.Background(), cfg.timeout) + err = run(ctx, cfg) + cancel() + if err != nil { + fmt.Fprintf(os.Stderr, "e2e failed: %v\n", err) + os.Exit(1) + } + _, _ = fmt.Fprintln(os.Stdout, "e2e succeeded") +} + +func parseConfig() (config, error) { + var cfg config + flag.StringVar(&cfg.httpBaseURL, "http-base", getenv("E2E_HTTP_BASE", "http://127.0.0.1:9090/mcp"), "HTTP base URL for MCP endpoints") + flag.StringVar(&cfg.adminToken, "admin-token", getenv("ADMIN_TOKEN", ""), "Admin JWT token") + flag.StringVar(&cfg.testUserToken, "test-user-token", getenv("TEST_USER_TOKEN", ""), "Test user JWT token") + flag.DurationVar(&cfg.timeout, "timeout", 3*time.Minute, "Overall timeout for the E2E run") + flag.BoolVar(&cfg.verbose, "verbose", getenvBool("E2E_VERBOSE", false), "Enable verbose logging") + flag.Parse() + + if cfg.adminToken == "" { + return config{}, errors.New("admin token is required") + } + if cfg.testUserToken == "" { + return config{}, errors.New("test-user token is required") + } + + normalized, err := normalizeBaseURL(cfg.httpBaseURL) + if err != nil { + return config{}, err + } + cfg.httpBaseURL = normalized + return cfg, nil +} + +func normalizeBaseURL(raw string) (string, error) { + parsed, err := url.Parse(raw) + if err != nil { + return "", fmt.Errorf("invalid http-base URL: %w", err) + } + if parsed.Scheme == "" || parsed.Host == "" { + return "", fmt.Errorf("invalid http-base URL: %s", raw) + } + parsed.Path = strings.TrimRight(parsed.Path, "/") + return parsed.String(), nil +} + +func run(ctx context.Context, cfg config) error { + logf(cfg.verbose, "http base: %s", cfg.httpBaseURL) + if err := checkHealth(ctx, cfg.httpBaseURL); err != nil { + return err + } + + sseURL := cfg.httpBaseURL + "/sse" + if err := expectUnauthorized(ctx, sseURL, "", cfg.verbose); err != nil { + return err + } + if err := expectUnauthorized(ctx, sseURL, "invalid-token", cfg.verbose); err != nil { + return err + } + + adminClient, err := newAuthedClient(ctx, sseURL, cfg.adminToken, "snmcp-e2e-admin") + if err != nil { + return err + } + defer func() { + _ = adminClient.Close() + }() + + testClient, err := newAuthedClient(ctx, sseURL, cfg.testUserToken, "snmcp-e2e-test-user") + if err != nil { + return err + } + defer func() { + _ = testClient.Close() + }() + + clusters, err := listClusters(ctx, adminClient) + if err != nil { + return err + } + if len(clusters) == 0 { + return errors.New("no clusters returned from pulsar_admin_cluster") + } + cluster := clusters[0] + + suffix := time.Now().UnixNano() + tenant := fmt.Sprintf("e2e-%d", suffix) + namespace := fmt.Sprintf("%s/ns-%d", tenant, suffix) + topic := fmt.Sprintf("persistent://%s/topic-%d", namespace, suffix) + concurrentTopic := fmt.Sprintf("persistent://%s/topic-concurrent-%d", namespace, suffix) + + result, err := callTool(ctx, adminClient, "pulsar_admin_tenant", map[string]any{ + "resource": "tenant", + "operation": "create", + "tenant": tenant, + "adminRoles": []string{"admin"}, + "allowedClusters": []string{cluster}, + }) + if err := requireToolOK(result, err, "pulsar_admin_tenant create"); err != nil { + return err + } + + result, err = callTool(ctx, adminClient, "pulsar_admin_namespace", map[string]any{ + "operation": "create", + "namespace": namespace, + "clusters": []string{cluster}, + }) + if err := requireToolOK(result, err, "pulsar_admin_namespace create"); err != nil { + return err + } + + result, err = callTool(ctx, adminClient, "pulsar_admin_namespace_policy_set", map[string]any{ + "namespace": namespace, + "policy": "permission", + "role": "test-user", + "actions": []string{"consume"}, + }) + if err := requireToolOK(result, err, "pulsar_admin_namespace_policy_set permission"); err != nil { + return err + } + + result, err = callTool(ctx, adminClient, "pulsar_admin_topic", map[string]any{ + "resource": "topic", + "operation": "create", + "topic": topic, + "partitions": float64(0), + }) + if err := requireToolOK(result, err, "pulsar_admin_topic create"); err != nil { + return err + } + + result, err = callTool(ctx, adminClient, "pulsar_client_produce", map[string]any{ + "topic": topic, + "messages": []string{"admin-message"}, + }) + if err := requireToolOK(result, err, "pulsar_client_produce admin"); err != nil { + return err + } + + result, err = callTool(ctx, testClient, "pulsar_client_consume", map[string]any{ + "topic": topic, + "subscription-name": fmt.Sprintf("sub-%d", suffix), + "initial-position": "earliest", + "num-messages": float64(1), + "timeout": float64(15), + "subscription-type": "exclusive", + "subscription-mode": "durable", + "show-properties": false, + "hide-payload": false, + }) + if err := requireToolOK(result, err, "pulsar_client_consume test-user"); err != nil { + return err + } + + result, err = callTool(ctx, testClient, "pulsar_client_produce", map[string]any{ + "topic": topic, + "messages": []string{"unauthorized-message"}, + }) + if err := requireToolError(result, err, "pulsar_client_produce test-user"); err != nil { + return err + } + + result, err = callTool(ctx, adminClient, "pulsar_admin_topic", map[string]any{ + "resource": "topic", + "operation": "create", + "topic": concurrentTopic, + "partitions": float64(0), + }) + if err := requireToolOK(result, err, "pulsar_admin_topic create concurrent"); err != nil { + return err + } + + if err := runConcurrent(ctx, adminClient, testClient, concurrentTopic, fmt.Sprintf("sub-concurrent-%d", suffix)); err != nil { + return err + } + + return nil +} + +func checkHealth(ctx context.Context, httpBaseURL string) error { + healthURL := httpBaseURL + "/healthz" + readyURL := httpBaseURL + "/readyz" + + if err := expectStatusOK(ctx, healthURL); err != nil { + return fmt.Errorf("healthz check failed: %w", err) + } + if err := expectStatusOK(ctx, readyURL); err != nil { + return fmt.Errorf("readyz check failed: %w", err) + } + return nil +} + +func expectStatusOK(ctx context.Context, target string) error { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, target, nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer func() { + _ = resp.Body.Close() + }() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + return nil +} + +func expectUnauthorized(ctx context.Context, sseURL, token string, verbose bool) error { + logf(verbose, "checking unauthorized: url=%s token_present=%t", sseURL, token != "") + status, err := probeSSEStatus(ctx, sseURL, token) + if err != nil { + logf(verbose, "probe SSE failed: %v", err) + return err + } + logf(verbose, "probe SSE status: %d", status) + if status == http.StatusUnauthorized || status == http.StatusForbidden { + return nil + } + if token == "" { + return fmt.Errorf("expected unauthorized status for %s, got %d", sseURL, status) + } + + headers := map[string]string{ + "Authorization": "Bearer " + token, + } + c, err := client.NewSSEMCPClient(sseURL, client.WithHeaders(headers)) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + if err := c.Start(ctx); err != nil { + logf(verbose, "sse start error: %v", err) + if isAuthError(err) { + return nil + } + return fmt.Errorf("expected auth error for %s, got %v", sseURL, err) + } + + if err := initializeClient(ctx, c, "snmcp-e2e-unauthorized"); err != nil { + logf(verbose, "initialize error: %v", err) + if isAuthError(err) { + return nil + } + return fmt.Errorf("expected auth error during initialize for %s, got %v", sseURL, err) + } + + result, err := callTool(ctx, c, "pulsar_admin_cluster", map[string]any{ + "resource": "cluster", + "operation": "list", + }) + if err != nil { + logf(verbose, "tool call error: %v", err) + if isAuthError(err) { + return nil + } + return fmt.Errorf("expected auth error for %s, got %v", sseURL, err) + } + if result == nil || !result.IsError { + logf(verbose, "tool call result: %#v", result) + return fmt.Errorf("expected auth error for %s", sseURL) + } + if !isAuthText(firstText(result)) { + logf(verbose, "tool call error text: %s", firstText(result)) + return fmt.Errorf("expected auth error for %s, got %s", sseURL, firstText(result)) + } + return nil +} + +func newAuthedClient(ctx context.Context, sseURL, token, clientName string) (*client.Client, error) { + headers := map[string]string{ + "Authorization": "Bearer " + token, + } + c, err := client.NewSSEMCPClient(sseURL, client.WithHeaders(headers)) + if err != nil { + return nil, err + } + if err := c.Start(ctx); err != nil { + return nil, err + } + if err := initializeClient(ctx, c, clientName); err != nil { + return nil, err + } + return c, nil +} + +func initializeClient(ctx context.Context, c *client.Client, name string) error { + req := mcp.InitializeRequest{} + req.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION + req.Params.ClientInfo = mcp.Implementation{ + Name: name, + Version: "1.0.0", + } + _, err := c.Initialize(ctx, req) + return err +} + +func callTool(ctx context.Context, c *client.Client, name string, args map[string]any) (*mcp.CallToolResult, error) { + request := mcp.CallToolRequest{} + request.Params.Name = name + request.Params.Arguments = args + return c.CallTool(ctx, request) +} + +func requireToolOK(result *mcp.CallToolResult, err error, label string) error { + if err != nil { + return fmt.Errorf("%s failed: %w", label, err) + } + if result.IsError { + return fmt.Errorf("%s returned error: %s", label, firstText(result)) + } + return nil +} + +func requireToolError(result *mcp.CallToolResult, err error, label string) error { + if err != nil { + return fmt.Errorf("%s failed: %w", label, err) + } + if !result.IsError { + return fmt.Errorf("%s expected error, got success: %s", label, firstText(result)) + } + return nil +} + +func firstText(result *mcp.CallToolResult) string { + if result == nil || len(result.Content) == 0 { + return "" + } + if text, ok := result.Content[0].(mcp.TextContent); ok { + return text.Text + } + return "" +} + +func probeSSEStatus(ctx context.Context, sseURL, token string) (int, error) { + reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, sseURL, nil) + if err != nil { + return 0, err + } + req.Header.Set("Accept", "text/event-stream") + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + + httpClient := &http.Client{Timeout: 5 * time.Second} + resp, err := httpClient.Do(req) + if err != nil { + return 0, err + } + defer func() { + _ = resp.Body.Close() + }() + return resp.StatusCode, nil +} + +func isAuthError(err error) bool { + if err == nil { + return false + } + return isAuthText(err.Error()) +} + +func isAuthText(text string) bool { + if text == "" { + return false + } + lower := strings.ToLower(text) + if strings.Contains(lower, "status code: 401") || strings.Contains(lower, "status code: 403") { + return true + } + if strings.Contains(lower, " 401") || strings.Contains(lower, " 403") { + return true + } + if strings.Contains(lower, "unauthorized") { + return true + } + if strings.Contains(lower, "authentication") || strings.Contains(lower, "authorization") { + return true + } + if strings.Contains(lower, "permission") || strings.Contains(lower, "access denied") { + return true + } + if strings.Contains(lower, "missing authorization") || strings.Contains(lower, "session not found") { + return true + } + return false +} + +func listClusters(ctx context.Context, c *client.Client) ([]string, error) { + result, err := callTool(ctx, c, "pulsar_admin_cluster", map[string]any{ + "resource": "cluster", + "operation": "list", + }) + if err := requireToolOK(result, err, "pulsar_admin_cluster list"); err != nil { + return nil, err + } + raw := firstText(result) + if raw == "" { + return nil, errors.New("empty cluster list result") + } + var clusters []string + if err := json.Unmarshal([]byte(raw), &clusters); err != nil { + return nil, fmt.Errorf("failed to parse cluster list: %w", err) + } + return clusters, nil +} + +func runConcurrent(ctx context.Context, adminClient, testClient *client.Client, topic, subscription string) error { + var wg sync.WaitGroup + errCh := make(chan error, 2) + + wg.Add(1) + go func() { + defer wg.Done() + result, err := callTool(ctx, adminClient, "pulsar_client_produce", map[string]any{ + "topic": topic, + "messages": []string{"concurrent-message"}, + }) + err = requireToolOK(result, err, "pulsar_client_produce concurrent admin") + if err != nil { + errCh <- err + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + result, err := callTool(ctx, testClient, "pulsar_client_consume", map[string]any{ + "topic": topic, + "subscription-name": subscription, + "initial-position": "earliest", + "num-messages": float64(1), + "timeout": float64(15), + }) + err = requireToolOK(result, err, "pulsar_client_consume concurrent test-user") + if err != nil { + errCh <- err + } + }() + + wg.Wait() + close(errCh) + + for err := range errCh { + if err != nil { + return err + } + } + return nil +} + +func getenv(key, fallback string) string { + val := os.Getenv(key) + if val == "" { + return fallback + } + return val +} + +func getenvBool(key string, fallback bool) bool { + value, ok := os.LookupEnv(key) + if !ok { + return fallback + } + switch strings.ToLower(strings.TrimSpace(value)) { + case "1", "true", "yes", "y", "on": + return true + case "0", "false", "no", "n", "off": + return false + default: + return fallback + } +} + +func logf(enabled bool, format string, args ...any) { + if !enabled { + return + } + _, _ = fmt.Fprintf(os.Stderr, "[e2e] "+format+"\n", args...) +} diff --git a/scripts/e2e-test.sh b/scripts/e2e-test.sh index 74131c7..f008c28 100755 --- a/scripts/e2e-test.sh +++ b/scripts/e2e-test.sh @@ -5,8 +5,9 @@ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" E2E_DIR="${ROOT_DIR}/charts/snmcp/e2e" PULSAR_CONTAINER="${PULSAR_CONTAINER:-pulsar-standalone}" -PULSAR_IMAGE="${PULSAR_IMAGE:-snstage/pulsar-all:4.1.0.10}" +PULSAR_IMAGE="${PULSAR_IMAGE:-apachepulsar/pulsar-all:4.1.0}" KIND_NETWORK="${KIND_NETWORK:-kind}" +KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-kind}" PULSAR_WEB_PORT="${PULSAR_WEB_PORT:-8080}" PULSAR_BROKER_PORT="${PULSAR_BROKER_PORT:-6650}" PULSAR_STARTUP_TIMEOUT="${PULSAR_STARTUP_TIMEOUT:-180}" @@ -15,10 +16,16 @@ PULSAR_STARTUP_INTERVAL="${PULSAR_STARTUP_INTERVAL:-3}" SNMCP_RELEASE="${SNMCP_RELEASE:-snmcp}" SNMCP_NAMESPACE="${SNMCP_NAMESPACE:-default}" SNMCP_CHART_DIR="${SNMCP_CHART_DIR:-${ROOT_DIR}/charts/snmcp}" -SNMCP_FEATURES="${SNMCP_FEATURES:-pulsar-admin,pulsar-client}" +SNMCP_FEATURES="" SNMCP_IMAGE_REPO="${SNMCP_IMAGE_REPO:-}" SNMCP_IMAGE_TAG="${SNMCP_IMAGE_TAG:-}" SNMCP_WAIT_TIMEOUT="${SNMCP_WAIT_TIMEOUT:-180s}" +SNMCP_HTTP_PATH="${SNMCP_HTTP_PATH:-/mcp}" +SNMCP_SERVICE_PORT="${SNMCP_SERVICE_PORT:-9090}" +SNMCP_LOCAL_PORT="${SNMCP_LOCAL_PORT:-19090}" +SNMCP_PORT_FORWARD_TIMEOUT="${SNMCP_PORT_FORWARD_TIMEOUT:-60}" +SNMCP_E2E_BIN="${SNMCP_E2E_BIN:-${ROOT_DIR}/bin/snmcp-e2e}" +SNMCP_PORT_FORWARD_PID="" TOKEN_ENV_FILE="${TOKEN_ENV_FILE:-${E2E_DIR}/test-tokens.env}" TOKEN_SECRET_FILE="${TOKEN_SECRET_FILE:-${E2E_DIR}/test-secret.key}" @@ -53,6 +60,19 @@ pulsar_ip() { docker inspect -f "{{.NetworkSettings.Networks.${KIND_NETWORK}.IPAddress}}" "$PULSAR_CONTAINER" } +wait_for_http() { + local url="$1" + local timeout="$2" + local deadline=$((SECONDS + timeout)) + while ((SECONDS < deadline)); do + if curl -fsS "$url" >/dev/null; then + return 0 + fi + sleep 2 + done + return 1 +} + setup_pulsar() { require_cmd docker require_cmd curl @@ -78,7 +98,7 @@ setup_pulsar() { -e PULSAR_PREFIX_brokerClientAuthenticationParameters="token:${ADMIN_TOKEN}" \ -v "$TOKEN_SECRET_FILE:/pulsarctl/test/auth/token/secret.key:ro" \ "$PULSAR_IMAGE" \ - bash -lc 'set -- $(hostname -i); export PULSAR_PREFIX_advertisedAddress=$1; exec bin/pulsar standalone' \ + bash -lc 'set -- $(hostname -i); export PULSAR_PREFIX_advertisedAddress=$1; bin/apply-config-from-env.py /pulsar/conf/standalone.conf; exec bin/pulsar standalone' \ >/dev/null log "waiting for pulsar to be ready" @@ -98,6 +118,21 @@ setup_pulsar() { die "pulsar not ready after ${PULSAR_STARTUP_TIMEOUT}s" } +build_image() { + require_cmd docker + require_cmd kind + + SNMCP_IMAGE_REPO="${SNMCP_IMAGE_REPO:-snmcp-e2e}" + SNMCP_IMAGE_TAG="${SNMCP_IMAGE_TAG:-local}" + + local image_ref="${SNMCP_IMAGE_REPO}:${SNMCP_IMAGE_TAG}" + log "building image ${image_ref}" + docker build -t "$image_ref" -f "${ROOT_DIR}/Dockerfile" "$ROOT_DIR" >/dev/null + + log "loading image into kind" + kind load docker-image "$image_ref" --name "$KIND_CLUSTER_NAME" >/dev/null +} + deploy_mcp() { require_cmd helm require_cmd kubectl @@ -113,7 +148,6 @@ deploy_mcp() { --create-namespace --set "pulsar.webServiceURL=http://${ip}:${PULSAR_WEB_PORT}" --set "pulsar.serviceURL=pulsar://${ip}:${PULSAR_BROKER_PORT}" - --set "server.features={${SNMCP_FEATURES}}" --wait --timeout "$SNMCP_WAIT_TIMEOUT" ) @@ -137,6 +171,32 @@ deploy_mcp() { log "snmcp deployed and ready" } +run_tests() { + require_cmd kubectl + require_cmd go + require_cmd curl + load_tokens + + log "building snmcp-e2e binary" + go build -o "$SNMCP_E2E_BIN" "${ROOT_DIR}/cmd/snmcp-e2e" >/dev/null + + log "starting port-forward for snmcp service" + kubectl port-forward "svc/${SNMCP_RELEASE}" "${SNMCP_LOCAL_PORT}:${SNMCP_SERVICE_PORT}" \ + --namespace "$SNMCP_NAMESPACE" >/dev/null 2>&1 & + SNMCP_PORT_FORWARD_PID=$! + + trap 'if [[ -n "${SNMCP_PORT_FORWARD_PID:-}" ]]; then kill "$SNMCP_PORT_FORWARD_PID" >/dev/null 2>&1 || true; fi' RETURN + + local health_url="http://127.0.0.1:${SNMCP_LOCAL_PORT}${SNMCP_HTTP_PATH}/healthz" + if ! wait_for_http "$health_url" "$SNMCP_PORT_FORWARD_TIMEOUT"; then + die "port-forward did not become ready within ${SNMCP_PORT_FORWARD_TIMEOUT}s" + fi + + local http_base="http://127.0.0.1:${SNMCP_LOCAL_PORT}${SNMCP_HTTP_PATH}" + log "running snmcp-e2e against ${http_base}" + E2E_HTTP_BASE="$http_base" "$SNMCP_E2E_BIN" +} + cleanup() { require_cmd docker @@ -155,9 +215,11 @@ Usage: $0 Commands: setup-pulsar Start Pulsar standalone with JWT auth on the kind network + build-image Build snmcp image and load into kind deploy-mcp Deploy snmcp Helm chart and wait for readiness + run-tests Port-forward snmcp service and run E2E client cleanup Remove Pulsar container and uninstall snmcp release - all Run setup-pulsar then deploy-mcp + all Run setup-pulsar, build-image, deploy-mcp, and run-tests USAGE } @@ -167,15 +229,23 @@ main() { setup-pulsar) setup_pulsar ;; + build-image) + build_image + ;; deploy-mcp) deploy_mcp ;; + run-tests) + run_tests + ;; cleanup) cleanup ;; all) setup_pulsar + build_image deploy_mcp + run_tests ;; *) usage From 3cd837f65171579781c130c8f9006859355b9038 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Thu, 8 Jan 2026 23:18:36 +0800 Subject: [PATCH 05/13] docs(claude): update CLAUDE.md with build commands and architecture details --- CLAUDE.md | 79 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 730afd8..6f37d9b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,13 +6,15 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ```bash make build # Build server binary to bin/snmcp +make docker-build # Build local Docker image (both streamnative/mcp-server and streamnative/snmcp tags) +make docker-build-push # Build and push multi-platform image (linux/amd64,linux/arm64) +make docker-build-multiplatform # Build multi-platform image locally +make docker-buildx-setup # Setup Docker buildx for multi-platform builds +make license-check # Check license headers +make license-fix # Fix license headers go test -race ./... # Run all tests with race detection go test -race ./pkg/mcp/builders/... # Run specific package tests go test -v -run TestName ./pkg/... # Run a single test -make license-check # Check license headers -make license-fix # Fix license headers -make docker-build # Build local Docker image -make docker-build-push # Build and push multi-platform image ``` ## Architecture Overview @@ -24,6 +26,8 @@ The StreamNative MCP Server implements the Model Context Protocol using the `mar ``` Client Request β†’ MCP Server (pkg/mcp/server.go) ↓ + SSE/stdio transport layer (pkg/cmd/mcp/) + ↓ Tool Handler (from builders) ↓ Context Functions (pkg/mcp/ctx.go) @@ -47,7 +51,7 @@ Client Request β†’ MCP Server (pkg/mcp/server.go) 3. **Tool Builders Organization** - `builders/kafka/` - Kafka-specific tool builders (connect, consume, groups, partitions, produce, schema_registry, topics) - - `builders/pulsar/` - Pulsar-specific tool builders (brokers, cluster, functions, namespace, schema, sinks, sources, subscription, tenant, topic, etc.) + - `builders/pulsar/` - Pulsar-specific tool builders (brokers, brokers_stats, cluster, functions, functions_worker, namespace, namespace_policy, nsisolationpolicy, packages, resourcequotas, schema, sinks, sources, subscription, tenant, topic, topic_policy) - `builders/streamnative/` - StreamNative Cloud tool builders 4. **Tool Registration** (`pkg/mcp/*_tools.go`) @@ -64,6 +68,10 @@ Client Request β†’ MCP Server (pkg/mcp/server.go) 6. **Session Management** (`pkg/mcp/session/`) - `pulsar_session_manager.go` - LRU session cache with TTL cleanup for multi-session mode +7. **Transport Layer** (`pkg/cmd/mcp/`) + - `sse.go` - SSE transport with health endpoints (`/healthz`, `/readyz`) and auth middleware + - `server.go` - Stdio transport and common server initialization + ### Key Design Patterns - **Builder Pattern**: Tool builders create tools based on features and read-only mode @@ -71,6 +79,7 @@ Client Request β†’ MCP Server (pkg/mcp/server.go) - **Context Injection**: Sessions passed via `context.Context` using typed keys - **Feature Flags**: Tools enabled/disabled via string feature identifiers - **Circuit Breaker**: PFTools uses failure thresholds to prevent cascading failures +- **Multi-Session Pattern**: Per-user Pulsar sessions with LRU caching for SSE mode ## Adding New Tools @@ -157,10 +166,16 @@ When `--multi-session-pulsar` is enabled (SSE server with external Pulsar only): - **Session management**: See `pkg/mcp/session/pulsar_session_manager.go` Key files: -- `pkg/cmd/mcp/sse.go` - Auth middleware wraps SSEHandler()/MessageHandler() +- `pkg/cmd/mcp/sse.go` - Auth middleware wraps SSEHandler()/MessageHandler(), health endpoints - `pkg/mcp/session/pulsar_session_manager.go` - LRU session cache with TTL cleanup - `pkg/cmd/mcp/server.go` - Skips global PulsarSession when multi-session enabled +### Health Endpoints + +SSE server exposes health check endpoints: +- `GET /mcp/healthz` - Liveness probe (always returns "ok") +- `GET /mcp/readyz` - Readiness probe (always returns "ready") + ## Feature Flags Available feature flags (defined in `pkg/mcp/features.go`): @@ -171,18 +186,62 @@ Available feature flags (defined in `pkg/mcp/features.go`): | `all-kafka` | All Kafka features | | `all-pulsar` | All Pulsar features | | `kafka-client` | Kafka produce/consume | -| `kafka-admin` | Kafka admin operations | +| `kafka-admin` | Kafka admin operations (all admin tools) | | `kafka-admin-schema-registry` | Schema Registry | | `kafka-admin-kafka-connect` | Kafka Connect | -| `pulsar-admin` | Pulsar admin operations | +| `kafka-admin-topics` | Manage Kafka topics | +| `kafka-admin-partitions` | Manage Kafka partitions | +| `kafka-admin-groups` | Manage Kafka consumer groups | +| `pulsar-admin` | Pulsar admin operations (all admin tools) | | `pulsar-client` | Pulsar produce/consume | -| `pulsar-admin-*` | Various Pulsar admin features (brokers, clusters, functions, namespaces, etc.) | +| `pulsar-admin-brokers` | Manage Pulsar brokers | +| `pulsar-admin-brokers-status` | Pulsar broker status | +| `pulsar-admin-broker-stats` | Access Pulsar broker statistics | +| `pulsar-admin-clusters` | Manage Pulsar clusters | +| `pulsar-admin-functions` | Manage Pulsar Functions | +| `pulsar-admin-functions-worker` | Manage Pulsar Function workers | +| `pulsar-admin-namespaces` | Manage Pulsar namespaces | +| `pulsar-admin-namespace-policy` | Configure namespace policies | +| `pulsar-admin-ns-isolation-policy` | Manage namespace isolation policies | +| `pulsar-admin-packages` | Manage Pulsar packages | +| `pulsar-admin-resource-quotas` | Configure resource quotas | +| `pulsar-admin-schemas` | Manage Pulsar schemas | +| `pulsar-admin-subscriptions` | Manage Pulsar subscriptions | +| `pulsar-admin-tenants` | Manage Pulsar tenants | +| `pulsar-admin-topics` | Manage Pulsar topics | +| `pulsar-admin-sinks` | Manage Pulsar IO sinks | +| `pulsar-admin-sources` | Manage Pulsar Sources | +| `pulsar-admin-topic-policy` | Configure topic policies | | `streamnative-cloud` | StreamNative Cloud context management | | `functions-as-tools` | Dynamic Pulsar Functions as MCP tools | +## Helm Chart + +The project includes a Helm chart for Kubernetes deployment at `charts/snmcp/`: + +```bash +# Basic installation +helm install snmcp ./charts/snmcp \ + --set pulsar.webServiceURL=http://pulsar.example.com:8080 + +# With TLS +helm install snmcp ./charts/snmcp \ + --set pulsar.webServiceURL=https://pulsar:8443 \ + --set pulsar.tls.enabled=true \ + --set pulsar.tls.secretName=pulsar-tls +``` + +The chart runs MCP Server in Multi-Session Pulsar mode with authentication via `Authorization: Bearer ` header. + +## SDK Packages + +The project includes generated SDK packages: +- `sdk/sdk-apiserver/` - StreamNative Cloud API server client +- `sdk/sdk-kafkaconnect/` - Kafka Connect client + ## Error Handling - Wrap errors: `fmt.Errorf("failed to X: %w", err)` - Return tool errors: `mcp.NewToolResultError("message")` - Check session nil before operations -- For PFTools, use circuit breaker to handle repeated failures \ No newline at end of file +- For PFTools, use circuit breaker to handle repeated failures From ebef8620106b47e5a44ea0c89a64a0c279f08b9d Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Thu, 8 Jan 2026 23:36:46 +0800 Subject: [PATCH 06/13] refactor(auth): migrate from dgrijalva/jwt-go to golang-jwt/jwt --- go.mod | 2 +- go.sum | 3 +- pkg/auth/auth.go | 10 ++- pkg/auth/authorization_tokenretriever.go | 7 +- pkg/auth/cache/cache.go | 1 + pkg/auth/client_credentials_flow.go | 2 + pkg/auth/client_credentials_provider.go | 10 ++- pkg/auth/oidc_endpoint_provider.go | 2 +- pkg/auth/store/keyring.go | 7 ++ pkg/cmd/mcp/server.go | 2 +- pkg/cmd/mcp/stdio.go | 2 +- pkg/config/auth.go | 6 +- pkg/config/config.go | 10 ++- pkg/config/options.go | 74 ++++++++++----------- pkg/kafka/connection.go | 4 +- pkg/kafka/kafkaconnect.go | 14 ++-- pkg/log/io.go | 1 + pkg/mcp/builders/pulsar/schema.go | 3 +- pkg/mcp/builders/registry_test.go | 54 +++++---------- pkg/mcp/pftools/manager.go | 2 +- pkg/mcp/prompts.go | 6 +- pkg/mcp/sncontext_utils.go | 12 ++-- pkg/mcp/streamnative_resources_log_tools.go | 2 +- pkg/mcp/streamnative_resources_tools.go | 12 ++-- pkg/pulsar/connection.go | 3 +- pkg/schema/avro_core.go | 2 +- pkg/schema/avro_test.go | 5 +- 27 files changed, 140 insertions(+), 118 deletions(-) diff --git a/go.mod b/go.mod index 606d990..3d63a7b 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.24.4 require ( github.com/99designs/keyring v1.2.2 github.com/apache/pulsar-client-go v0.13.1 - github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/golang-jwt/jwt v3.2.1+incompatible github.com/google/go-cmp v0.7.0 github.com/hamba/avro/v2 v2.28.0 github.com/mark3labs/mcp-go v0.43.2 diff --git a/go.sum b/go.sum index 8426de5..1156150 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -85,6 +83,7 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index 0938d5a..737b907 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package auth provides authentication and authorization functionality for StreamNative MCP Server. +// It implements OAuth 2.0 flows including client credentials and device authorization grants. package auth import ( @@ -20,12 +22,13 @@ import ( "io" "time" - "github.com/dgrijalva/jwt-go" + "github.com/golang-jwt/jwt" "golang.org/x/oauth2" "k8s.io/utils/clock" ) const ( + // ClaimNameUserName is the JWT claim name for the username. ClaimNameUserName = "https://streamnative.io/username" ) @@ -139,17 +142,18 @@ func ExtractUserName(token oauth2.Token) (string, error) { return "", fmt.Errorf("access token doesn't contain a recognizable user claim") } +// DumpToken outputs token information to the provided writer for debugging. func DumpToken(out io.Writer, token oauth2.Token) { p := jwt.Parser{} claims := jwt.MapClaims{} if _, _, err := p.ParseUnverified(token.AccessToken, claims); err != nil { - fmt.Fprintf(out, "Unable to parse token. Err: %v\n", err) + _, _ = fmt.Fprintf(out, "Unable to parse token. Err: %v\n", err) return } text, err := json.MarshalIndent(claims, "", " ") if err != nil { - fmt.Fprintf(out, "Unable to print token. Err: %v\n", err) + _, _ = fmt.Fprintf(out, "Unable to print token. Err: %v\n", err) } _, _ = out.Write(text) _, _ = fmt.Fprintln(out, "") diff --git a/pkg/auth/authorization_tokenretriever.go b/pkg/auth/authorization_tokenretriever.go index 99c2353..730e9ff 100644 --- a/pkg/auth/authorization_tokenretriever.go +++ b/pkg/auth/authorization_tokenretriever.go @@ -82,6 +82,7 @@ type TokenErrorResponse struct { ErrorDescription string `json:"error_description"` } +// TokenError represents an error response from the token endpoint. type TokenError struct { ErrorCode string ErrorDescription string @@ -222,6 +223,7 @@ func (ce *TokenRetriever) ExchangeCode(req AuthorizationCodeExchangeRequest) (*T if err != nil { return nil, err } + defer func() { _ = response.Body.Close() }() return ce.handleAuthTokensResponse(response) } @@ -230,7 +232,7 @@ func (ce *TokenRetriever) ExchangeCode(req AuthorizationCodeExchangeRequest) (*T // auth tokens for errors and parsing the raw body to a TokenResult struct func (ce *TokenRetriever) handleAuthTokensResponse(resp *http.Response) (*TokenResult, error) { if resp.Body != nil { - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() } if resp.StatusCode < 200 || resp.StatusCode > 299 { @@ -272,6 +274,7 @@ func (ce *TokenRetriever) ExchangeDeviceCode(ctx context.Context, req DeviceCode if err != nil { return nil, err } + defer func() { _ = response.Body.Close() }() token, err := ce.handleAuthTokensResponse(response) if err == nil { return token, nil @@ -314,6 +317,7 @@ func (ce *TokenRetriever) ExchangeRefreshToken(req RefreshTokenExchangeRequest) if err != nil { return nil, err } + defer func() { _ = response.Body.Close() }() return ce.handleAuthTokensResponse(response) } @@ -330,6 +334,7 @@ func (ce *TokenRetriever) ExchangeClientCredentials(req ClientCredentialsExchang if err != nil { return nil, err } + defer func() { _ = response.Body.Close() }() return ce.handleAuthTokensResponse(response) } diff --git a/pkg/auth/cache/cache.go b/pkg/auth/cache/cache.go index 70bc133..5a2b8ce 100644 --- a/pkg/auth/cache/cache.go +++ b/pkg/auth/cache/cache.go @@ -51,6 +51,7 @@ type tokenCache struct { token *oauth2.Token } +// NewDefaultTokenCache creates a default token cache with the given store and refresher. func NewDefaultTokenCache(store store.Store, audience string, refresher auth.AuthorizationGrantRefresher) (CachingTokenSource, error) { cache := &tokenCache{ diff --git a/pkg/auth/client_credentials_flow.go b/pkg/auth/client_credentials_flow.go index 7de8bd4..2fa7298 100644 --- a/pkg/auth/client_credentials_flow.go +++ b/pkg/auth/client_credentials_flow.go @@ -41,6 +41,7 @@ type ClientCredentialsExchanger interface { ExchangeClientCredentials(req ClientCredentialsExchangeRequest) (*TokenResult, error) } +// NewClientCredentialsFlow creates a new client credentials flow with the given components. func NewClientCredentialsFlow( issuerData Issuer, provider ClientCredentialsProvider, @@ -127,6 +128,7 @@ type ClientCredentialsGrantRefresher struct { clock clock.Clock } +// NewDefaultClientCredentialsGrantRefresher creates a default client credentials grant refresher. func NewDefaultClientCredentialsGrantRefresher(issuerData Issuer, clock clock.Clock) (*ClientCredentialsGrantRefresher, error) { wellKnownEndpoints, err := GetOIDCWellKnownEndpointsFromIssuerURL(issuerData.IssuerEndpoint) diff --git a/pkg/auth/client_credentials_provider.go b/pkg/auth/client_credentials_provider.go index 2ab652d..47e84f2 100644 --- a/pkg/auth/client_credentials_provider.go +++ b/pkg/auth/client_credentials_provider.go @@ -18,19 +18,23 @@ import ( "encoding/json" "fmt" "os" + "path/filepath" "strings" ) const ( + // KeyFileTypeServiceAccount identifies service account key files. KeyFileTypeServiceAccount = "sn_service_account" FILE = "file://" DATA = "data://" ) +// KeyFileProvider provides client credentials from a key file path. type KeyFileProvider struct { KeyFile string } +// KeyFile holds service account credentials from a JSON key file. type KeyFile struct { Type string `json:"type"` ClientID string `json:"client_id"` @@ -38,6 +42,7 @@ type KeyFile struct { ClientEmail string `json:"client_email"` } +// NewClientCredentialsProviderFromKeyFile creates a provider from a key file path. func NewClientCredentialsProviderFromKeyFile(keyFile string) *KeyFileProvider { return &KeyFileProvider{ KeyFile: keyFile, @@ -52,7 +57,7 @@ func (k *KeyFileProvider) GetClientCredentials() (*KeyFile, error) { switch { case strings.HasPrefix(k.KeyFile, FILE): filename := strings.TrimPrefix(k.KeyFile, FILE) - keyFile, err = os.ReadFile(filename) + keyFile, err = os.ReadFile(filepath.Clean(filename)) case strings.HasPrefix(k.KeyFile, DATA): keyFile = []byte(strings.TrimPrefix(k.KeyFile, DATA)) case strings.HasPrefix(k.KeyFile, "data:"): @@ -80,10 +85,12 @@ func (k *KeyFileProvider) GetClientCredentials() (*KeyFile, error) { return &v, nil } +// KeyFileStructProvider provides client credentials from an in-memory KeyFile struct. type KeyFileStructProvider struct { KeyFile *KeyFile } +// GetClientCredentials returns the client credentials from the in-memory KeyFile. func (k *KeyFileStructProvider) GetClientCredentials() (*KeyFile, error) { if k.KeyFile == nil { return nil, fmt.Errorf("key file is nil") @@ -91,6 +98,7 @@ func (k *KeyFileStructProvider) GetClientCredentials() (*KeyFile, error) { return k.KeyFile, nil } +// NewClientCredentialsProviderFromKeyFileStruct creates a provider from an in-memory KeyFile. func NewClientCredentialsProviderFromKeyFileStruct(keyFile *KeyFile) *KeyFileStructProvider { return &KeyFileStructProvider{ KeyFile: keyFile, diff --git a/pkg/auth/oidc_endpoint_provider.go b/pkg/auth/oidc_endpoint_provider.go index 69c9ef1..62ecf1d 100644 --- a/pkg/auth/oidc_endpoint_provider.go +++ b/pkg/auth/oidc_endpoint_provider.go @@ -43,7 +43,7 @@ func GetOIDCWellKnownEndpointsFromIssuerURL(issuerURL string) (*OIDCWellKnownEnd if err != nil { return nil, errors.Wrapf(err, "could not get well known endpoints from url %s", u.String()) } - defer r.Body.Close() + defer func() { _ = r.Body.Close() }() var wkEndpoints OIDCWellKnownEndpoints err = json.NewDecoder(r.Body).Decode(&wkEndpoints) diff --git a/pkg/auth/store/keyring.go b/pkg/auth/store/keyring.go index ae09e2d..f2c47f2 100644 --- a/pkg/auth/store/keyring.go +++ b/pkg/auth/store/keyring.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package store provides token storage implementations for authentication credentials. +// It includes a KeyringStore implementation that uses the system keyring for secure storage. package store import ( @@ -25,6 +27,7 @@ import ( "k8s.io/utils/clock" ) +// KeyringStore provides secure token storage using the system keyring. type KeyringStore struct { kr keyring.Keyring clock clock.Clock @@ -48,6 +51,7 @@ func NewKeyringStore(kr keyring.Keyring) (*KeyringStore, error) { var _ Store = &KeyringStore{} +// SaveGrant saves an authorization grant to the keyring. func (f *KeyringStore) SaveGrant(audience string, grant auth.AuthorizationGrant) error { f.lock.Lock() defer f.lock.Unlock() @@ -75,6 +79,7 @@ func (f *KeyringStore) SaveGrant(audience string, grant auth.AuthorizationGrant) return nil } +// LoadGrant loads an authorization grant from the keyring. func (f *KeyringStore) LoadGrant(audience string) (*auth.AuthorizationGrant, error) { f.lock.Lock() defer f.lock.Unlock() @@ -97,6 +102,7 @@ func (f *KeyringStore) LoadGrant(audience string) (*auth.AuthorizationGrant, err return &item.Grant, nil } +// WhoAmI returns the username associated with the grant for the given audience. func (f *KeyringStore) WhoAmI(audience string) (string, error) { f.lock.Lock() defer f.lock.Unlock() @@ -132,6 +138,7 @@ func (f *KeyringStore) WhoAmI(audience string) (string, error) { return label, err } +// Logout removes all stored grants from the keyring. func (f *KeyringStore) Logout() error { f.lock.Lock() defer f.lock.Unlock() diff --git a/pkg/cmd/mcp/server.go b/pkg/cmd/mcp/server.go index 5de74c8..3dd5aa1 100644 --- a/pkg/cmd/mcp/server.go +++ b/pkg/cmd/mcp/server.go @@ -29,7 +29,7 @@ import ( ) func newMcpServer(_ context.Context, configOpts *ServerOptions, logrusLogger *logrus.Logger) (*mcp.Server, error) { - snConfig := configOpts.Options.LoadConfigOrDie() + snConfig := configOpts.LoadConfigOrDie() var s *server.MCPServer var mcpServer *mcp.Server switch { diff --git a/pkg/cmd/mcp/stdio.go b/pkg/cmd/mcp/stdio.go index b06ecb6..8bf7dca 100644 --- a/pkg/cmd/mcp/stdio.go +++ b/pkg/cmd/mcp/stdio.go @@ -110,7 +110,7 @@ func initLogger(filePath string) (*logrus.Logger, error) { return logrus.New(), nil } - fd, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + fd, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) if err != nil { return nil, fmt.Errorf("failed to open log file: %w", err) } diff --git a/pkg/config/auth.go b/pkg/config/auth.go index e605a50..e7780bf 100644 --- a/pkg/config/auth.go +++ b/pkg/config/auth.go @@ -24,10 +24,13 @@ import ( ) const ( - ServiceName = "StreamNativeMCP" + // ServiceName is the name used for keyring service. + ServiceName = "StreamNativeMCP" + // KeychainName is the name of the macOS keychain. KeychainName = "snmcp" ) +// AuthOptions provides configuration options for authentication. type AuthOptions struct { BackendOverride string storage Storage @@ -37,6 +40,7 @@ type AuthOptions struct { store.Store } +// NewDefaultAuthOptions creates a new AuthOptions with default values. func NewDefaultAuthOptions() AuthOptions { return AuthOptions{} } diff --git a/pkg/config/config.go b/pkg/config/config.go index 2084e0f..89e0561 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -21,6 +21,7 @@ import ( "github.com/streamnative/streamnative-mcp-server/pkg/auth" ) +// SnConfig holds the StreamNative MCP Server configuration. type SnConfig struct { // the API server endpoint Server string `yaml:"server"` @@ -40,6 +41,7 @@ type SnConfig struct { ExternalPulsar *ExternalPulsar `yaml:"external-pulsar"` } +// Auth holds authentication configuration for the StreamNative API. type Auth struct { // the OAuth 2.0 issuer endpoint IssuerEndpoint string `yaml:"issuer"` @@ -50,10 +52,10 @@ type Auth struct { } func (a *Auth) Validate() error { - if !(isValidIssuer(a.IssuerEndpoint) && isValidClientID(a.ClientID) && isValidAudience(a.Audience)) { - return errors.New("configuration error: auth section is incomplete or invalid") + if isValidIssuer(a.IssuerEndpoint) && isValidClientID(a.ClientID) && isValidAudience(a.Audience) { + return nil } - return nil + return errors.New("configuration error: auth section is incomplete or invalid") } func isValidIssuer(iss string) bool { @@ -77,12 +79,14 @@ func (a *Auth) Issuer() auth.Issuer { } } +// Context holds the default context for cluster connections. type Context struct { Organization string `yaml:"organization,omitempty"` PulsarInstance string `yaml:"pulsar-instance,omitempty"` PulsarCluster string `yaml:"pulsar-cluster,omitempty"` } +// Storage defines the interface for persisting configuration and credentials. type Storage interface { // Gets the config directory for configuration files, credentials and caches GetConfigDirectory() string diff --git a/pkg/config/options.go b/pkg/config/options.go index bd5cbcc..5078dc3 100644 --- a/pkg/config/options.go +++ b/pkg/config/options.go @@ -162,42 +162,42 @@ func (o *Options) AddFlags(cmd *cobra.Command) { o.AuthOptions.AddFlags(cmd) // Bind command line flags to viper - viper.BindPFlag("config-dir", cmd.PersistentFlags().Lookup("config-dir")) - viper.BindPFlag("key-file", cmd.PersistentFlags().Lookup("key-file")) - viper.BindPFlag("server", cmd.PersistentFlags().Lookup("server")) - viper.BindPFlag("issuer", cmd.PersistentFlags().Lookup("issuer")) - viper.BindPFlag("audience", cmd.PersistentFlags().Lookup("audience")) - viper.BindPFlag("client-id", cmd.PersistentFlags().Lookup("client-id")) - viper.BindPFlag("organization", cmd.PersistentFlags().Lookup("organization")) - viper.BindPFlag("proxy-location", cmd.PersistentFlags().Lookup("proxy-location")) - viper.BindPFlag("log-location", cmd.PersistentFlags().Lookup("log-location")) - viper.BindPFlag("pulsar-instance", cmd.PersistentFlags().Lookup("pulsar-instance")) - viper.BindPFlag("pulsar-cluster", cmd.PersistentFlags().Lookup("pulsar-cluster")) - viper.BindPFlag("use-external-kafka", cmd.PersistentFlags().Lookup("use-external-kafka")) - viper.BindPFlag("use-external-pulsar", cmd.PersistentFlags().Lookup("use-external-pulsar")) - viper.BindPFlag("kafka-bootstrap-servers", cmd.PersistentFlags().Lookup("kafka-bootstrap-servers")) - viper.BindPFlag("kafka-schema-registry-url", cmd.PersistentFlags().Lookup("kafka-schema-registry-url")) - viper.BindPFlag("kafka-auth-type", cmd.PersistentFlags().Lookup("kafka-auth-type")) - viper.BindPFlag("kafka-auth-mechanism", cmd.PersistentFlags().Lookup("kafka-auth-mechanism")) - viper.BindPFlag("kafka-auth-user", cmd.PersistentFlags().Lookup("kafka-auth-user")) - viper.BindPFlag("kafka-auth-pass", cmd.PersistentFlags().Lookup("kafka-auth-pass")) - viper.BindPFlag("kafka-use-tls", cmd.PersistentFlags().Lookup("kafka-use-tls")) - viper.BindPFlag("kafka-client-key-file", cmd.PersistentFlags().Lookup("kafka-client-key-file")) - viper.BindPFlag("kafka-client-cert-file", cmd.PersistentFlags().Lookup("kafka-client-cert-file")) - viper.BindPFlag("kafka-ca-file", cmd.PersistentFlags().Lookup("kafka-ca-file")) - viper.BindPFlag("kafka-schema-registry-auth-user", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-user")) - viper.BindPFlag("kafka-schema-registry-auth-pass", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-pass")) - viper.BindPFlag("kafka-schema-registry-bearer-token", cmd.PersistentFlags().Lookup("kafka-schema-registry-bearer-token")) - viper.BindPFlag("pulsar-web-service-url", cmd.PersistentFlags().Lookup("pulsar-web-service-url")) - viper.BindPFlag("pulsar-service-url", cmd.PersistentFlags().Lookup("pulsar-service-url")) - viper.BindPFlag("pulsar-auth-plugin", cmd.PersistentFlags().Lookup("pulsar-auth-plugin")) - viper.BindPFlag("pulsar-auth-params", cmd.PersistentFlags().Lookup("pulsar-auth-params")) - viper.BindPFlag("pulsar-tls-allow-insecure-connection", cmd.PersistentFlags().Lookup("pulsar-tls-allow-insecure-connection")) - viper.BindPFlag("pulsar-tls-enable-hostname-verification", cmd.PersistentFlags().Lookup("pulsar-tls-enable-hostname-verification")) - viper.BindPFlag("pulsar-tls-trust-certs-file-path", cmd.PersistentFlags().Lookup("pulsar-tls-trust-certs-file-path")) - viper.BindPFlag("pulsar-tls-cert-file", cmd.PersistentFlags().Lookup("pulsar-tls-cert-file")) - viper.BindPFlag("pulsar-tls-key-file", cmd.PersistentFlags().Lookup("pulsar-tls-key-file")) - viper.BindPFlag("pulsar-token", cmd.PersistentFlags().Lookup("pulsar-token")) + _ = viper.BindPFlag("config-dir", cmd.PersistentFlags().Lookup("config-dir")) + _ = viper.BindPFlag("key-file", cmd.PersistentFlags().Lookup("key-file")) + _ = viper.BindPFlag("server", cmd.PersistentFlags().Lookup("server")) + _ = viper.BindPFlag("issuer", cmd.PersistentFlags().Lookup("issuer")) + _ = viper.BindPFlag("audience", cmd.PersistentFlags().Lookup("audience")) + _ = viper.BindPFlag("client-id", cmd.PersistentFlags().Lookup("client-id")) + _ = viper.BindPFlag("organization", cmd.PersistentFlags().Lookup("organization")) + _ = viper.BindPFlag("proxy-location", cmd.PersistentFlags().Lookup("proxy-location")) + _ = viper.BindPFlag("log-location", cmd.PersistentFlags().Lookup("log-location")) + _ = viper.BindPFlag("pulsar-instance", cmd.PersistentFlags().Lookup("pulsar-instance")) + _ = viper.BindPFlag("pulsar-cluster", cmd.PersistentFlags().Lookup("pulsar-cluster")) + _ = viper.BindPFlag("use-external-kafka", cmd.PersistentFlags().Lookup("use-external-kafka")) + _ = viper.BindPFlag("use-external-pulsar", cmd.PersistentFlags().Lookup("use-external-pulsar")) + _ = viper.BindPFlag("kafka-bootstrap-servers", cmd.PersistentFlags().Lookup("kafka-bootstrap-servers")) + _ = viper.BindPFlag("kafka-schema-registry-url", cmd.PersistentFlags().Lookup("kafka-schema-registry-url")) + _ = viper.BindPFlag("kafka-auth-type", cmd.PersistentFlags().Lookup("kafka-auth-type")) + _ = viper.BindPFlag("kafka-auth-mechanism", cmd.PersistentFlags().Lookup("kafka-auth-mechanism")) + _ = viper.BindPFlag("kafka-auth-user", cmd.PersistentFlags().Lookup("kafka-auth-user")) + _ = viper.BindPFlag("kafka-auth-pass", cmd.PersistentFlags().Lookup("kafka-auth-pass")) + _ = viper.BindPFlag("kafka-use-tls", cmd.PersistentFlags().Lookup("kafka-use-tls")) + _ = viper.BindPFlag("kafka-client-key-file", cmd.PersistentFlags().Lookup("kafka-client-key-file")) + _ = viper.BindPFlag("kafka-client-cert-file", cmd.PersistentFlags().Lookup("kafka-client-cert-file")) + _ = viper.BindPFlag("kafka-ca-file", cmd.PersistentFlags().Lookup("kafka-ca-file")) + _ = viper.BindPFlag("kafka-schema-registry-auth-user", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-user")) + _ = viper.BindPFlag("kafka-schema-registry-auth-pass", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-pass")) + _ = viper.BindPFlag("kafka-schema-registry-bearer-token", cmd.PersistentFlags().Lookup("kafka-schema-registry-bearer-token")) + _ = viper.BindPFlag("pulsar-web-service-url", cmd.PersistentFlags().Lookup("pulsar-web-service-url")) + _ = viper.BindPFlag("pulsar-service-url", cmd.PersistentFlags().Lookup("pulsar-service-url")) + _ = viper.BindPFlag("pulsar-auth-plugin", cmd.PersistentFlags().Lookup("pulsar-auth-plugin")) + _ = viper.BindPFlag("pulsar-auth-params", cmd.PersistentFlags().Lookup("pulsar-auth-params")) + _ = viper.BindPFlag("pulsar-tls-allow-insecure-connection", cmd.PersistentFlags().Lookup("pulsar-tls-allow-insecure-connection")) + _ = viper.BindPFlag("pulsar-tls-enable-hostname-verification", cmd.PersistentFlags().Lookup("pulsar-tls-enable-hostname-verification")) + _ = viper.BindPFlag("pulsar-tls-trust-certs-file-path", cmd.PersistentFlags().Lookup("pulsar-tls-trust-certs-file-path")) + _ = viper.BindPFlag("pulsar-tls-cert-file", cmd.PersistentFlags().Lookup("pulsar-tls-cert-file")) + _ = viper.BindPFlag("pulsar-tls-key-file", cmd.PersistentFlags().Lookup("pulsar-tls-key-file")) + _ = viper.BindPFlag("pulsar-token", cmd.PersistentFlags().Lookup("pulsar-token")) } // Complete completes options from the provided values @@ -212,7 +212,7 @@ func (o *Options) Complete() error { o.ConfigDir = filepath.Join(home, ".snmcp") } if _, err := os.Stat(o.ConfigDir); os.IsNotExist(err) { - err := os.MkdirAll(o.ConfigDir, 0755) + err := os.MkdirAll(o.ConfigDir, 0750) if err != nil { return fmt.Errorf("failed to create config directory: %w", err) } diff --git a/pkg/kafka/connection.go b/pkg/kafka/connection.go index 6db0d48..e87ab8d 100644 --- a/pkg/kafka/connection.go +++ b/pkg/kafka/connection.go @@ -80,12 +80,14 @@ func NewSession(ctx KafkaContext) (*Session, error) { return session, nil } +// SASLConfig holds SASL authentication configuration. type SASLConfig struct { Mechanism string Username string Password string } +// TLSConfig holds TLS configuration for Kafka connections. type TLSConfig struct { Enabled bool ClientKeyFile string @@ -116,7 +118,7 @@ func tlsOpt(config *TLSConfig, opts []kgo.Opt) ([]kgo.Opt, error) { func saslOpt(config *SASLConfig, opts []kgo.Opt) ([]kgo.Opt, error) { if config.Mechanism != "" || config.Username != "" || config.Password != "" { if config.Mechanism == "" || config.Username == "" || config.Password == "" { - return nil, fmt.Errorf("All of Mechanism, Username, and Password must be specified if any are") + return nil, fmt.Errorf("all of Mechanism, Username, and Password must be specified if any are") } method := strings.ToLower(config.Mechanism) method = strings.ReplaceAll(method, "-", "") diff --git a/pkg/kafka/kafkaconnect.go b/pkg/kafka/kafkaconnect.go index 8819aa2..bd4c467 100644 --- a/pkg/kafka/kafkaconnect.go +++ b/pkg/kafka/kafkaconnect.go @@ -192,7 +192,7 @@ func (c *connectImpl) ListConnectors(_ context.Context) ([]string, error) { if err != nil { return nil, fmt.Errorf("failed to list connectors: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() // Parse response body body, err := io.ReadAll(resp.Body) @@ -247,7 +247,7 @@ func (c *connectImpl) CreateConnector(_ context.Context, name string, config map if err != nil { return nil, fmt.Errorf("failed to create connector: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() // Parse response body body, err := io.ReadAll(resp.Body) @@ -298,7 +298,7 @@ func (c *connectImpl) UpdateConnector(_ context.Context, name string, config map if err != nil { return nil, fmt.Errorf("failed to update connector: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() // Parse response body body, err := io.ReadAll(resp.Body) @@ -393,7 +393,7 @@ func (c *connectImpl) GetConnectorStatus(_ context.Context, name string) (*Conne if err != nil { return nil, fmt.Errorf("failed to get connector status: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() // Parse response body body, err := io.ReadAll(resp.Body) @@ -448,7 +448,7 @@ func (c *connectImpl) GetConnectorTasks(_ context.Context, name string) ([]TaskI if err != nil { return nil, fmt.Errorf("failed to get connector tasks: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() // Parse response body body, err := io.ReadAll(resp.Body) @@ -489,7 +489,7 @@ func (c *connectImpl) ListPlugins(_ context.Context) ([]PluginInfo, error) { if err != nil { return nil, fmt.Errorf("failed to list connector plugins: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() // Parse response body body, err := io.ReadAll(resp.Body) @@ -536,7 +536,7 @@ func (c *connectImpl) ValidateConfig(_ context.Context, pluginClass string, conf if err != nil { return nil, fmt.Errorf("failed to validate connector config: %w", err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() // Parse response body body, err := io.ReadAll(resp.Body) diff --git a/pkg/log/io.go b/pkg/log/io.go index 4c831fe..8ebcb3a 100644 --- a/pkg/log/io.go +++ b/pkg/log/io.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package log provides logging utilities for StreamNative MCP Server. package log import ( diff --git a/pkg/mcp/builders/pulsar/schema.go b/pkg/mcp/builders/pulsar/schema.go index 8bf4de3..13fc41a 100644 --- a/pkg/mcp/builders/pulsar/schema.go +++ b/pkg/mcp/builders/pulsar/schema.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "os" + "path/filepath" "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" @@ -264,7 +265,7 @@ func (b *PulsarAdminSchemaToolBuilder) handleSchemaUpload(admin cmdutils.Client, // Read and parse the schema file var payload utils.PostSchemaPayload - file, err := os.ReadFile(filename) + file, err := os.ReadFile(filepath.Clean(filename)) if err != nil { return mcp.NewToolResultError(fmt.Sprintf("Failed to read schema file '%s': %v", filename, err)), nil } diff --git a/pkg/mcp/builders/registry_test.go b/pkg/mcp/builders/registry_test.go index a4de66b..766c266 100644 --- a/pkg/mcp/builders/registry_test.go +++ b/pkg/mcp/builders/registry_test.go @@ -148,8 +148,7 @@ func TestToolRegistry(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("panic_tool", []string{"feature"}) - ///nolint:errcheck - registry.Register(builder) // First registration + _ = registry.Register(builder) // First registration assert.Panics(t, func() { registry.MustRegister(builder) // Duplicate registration should panic }) @@ -158,8 +157,7 @@ func TestToolRegistry(t *testing.T) { t.Run("GetBuilder_Success", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("get_test_tool", []string{"feature"}) - ///nolint:errcheck - registry.Register(builder) + _ = registry.Register(builder) retrieved, exists := registry.GetBuilder("get_test_tool") assert.True(t, exists) @@ -178,10 +176,8 @@ func TestToolRegistry(t *testing.T) { builder1 := NewMockToolBuilder("tool1", []string{"feature1"}) builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) - ///nolint:errcheck - registry.Register(builder1) - ///nolint:errcheck - registry.Register(builder2) + _ = registry.Register(builder1) + _ = registry.Register(builder2) names := registry.ListBuilders() assert.Len(t, names, 2) @@ -194,8 +190,7 @@ func TestToolRegistry(t *testing.T) { t.Run("GetMetadata", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("metadata_tool", []string{"feature"}) - ///nolint:errcheck - registry.Register(builder) + _ = registry.Register(builder) metadata, exists := registry.GetMetadata("metadata_tool") assert.True(t, exists) @@ -208,10 +203,8 @@ func TestToolRegistry(t *testing.T) { builder1 := NewMockToolBuilder("tool1", []string{"feature1"}) builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) - ///nolint:errcheck - registry.Register(builder1) - ///nolint:errcheck - registry.Register(builder2) + _ = registry.Register(builder1) + _ = registry.Register(builder2) metadata := registry.ListMetadata() assert.Len(t, metadata, 2) @@ -222,8 +215,7 @@ func TestToolRegistry(t *testing.T) { t.Run("BuildSingle_Success", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("single_tool", []string{"test_feature"}) - ///nolint:errcheck - registry.Register(builder) + _ = registry.Register(builder) config := ToolBuildConfig{ Features: []string{"test_feature"}, @@ -250,8 +242,7 @@ func TestToolRegistry(t *testing.T) { t.Run("BuildSingle_ValidationFailed", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("validation_tool", []string{"required_feature"}) - ///nolint:errcheck - registry.Register(builder) + _ = registry.Register(builder) config := ToolBuildConfig{ Features: []string{"wrong_feature"}, @@ -267,10 +258,8 @@ func TestToolRegistry(t *testing.T) { builder1 := NewMockToolBuilder("tool1", []string{"feature1"}) builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) - ///nolint:errcheck - registry.Register(builder1) - ///nolint:errcheck - registry.Register(builder2) + _ = registry.Register(builder1) + _ = registry.Register(builder2) configs := map[string]ToolBuildConfig{ "tool1": {Features: []string{"feature1"}}, @@ -289,10 +278,8 @@ func TestToolRegistry(t *testing.T) { builder2.SetError(fmt.Errorf("build error")) - ///nolint:errcheck - registry.Register(builder1) - ///nolint:errcheck - registry.Register(builder2) + _ = registry.Register(builder1) + _ = registry.Register(builder2) configs := map[string]ToolBuildConfig{ "tool1": {Features: []string{"feature1"}}, @@ -310,12 +297,9 @@ func TestToolRegistry(t *testing.T) { builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) builder3 := NewMockToolBuilder("tool3", []string{"feature3"}) - ///nolint:errcheck - registry.Register(builder1) - ///nolint:errcheck - registry.Register(builder2) - ///nolint:errcheck - registry.Register(builder3) + _ = registry.Register(builder1) + _ = registry.Register(builder2) + _ = registry.Register(builder3) // Only provide feature1 and feature2 tools, err := registry.BuildAllWithFeatures(false, []string{"feature1", "feature2"}) @@ -326,8 +310,7 @@ func TestToolRegistry(t *testing.T) { t.Run("Clear", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("clear_tool", []string{"feature"}) - ///nolint:errcheck - registry.Register(builder) + _ = registry.Register(builder) assert.Equal(t, 1, registry.Count()) @@ -338,8 +321,7 @@ func TestToolRegistry(t *testing.T) { t.Run("Unregister", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("unregister_tool", []string{"feature"}) - ///nolint:errcheck - registry.Register(builder) + _ = registry.Register(builder) assert.Equal(t, 1, registry.Count()) diff --git a/pkg/mcp/pftools/manager.go b/pkg/mcp/pftools/manager.go index 0fef260..9e0dd91 100644 --- a/pkg/mcp/pftools/manager.go +++ b/pkg/mcp/pftools/manager.go @@ -61,7 +61,7 @@ type Server struct { func NewPulsarFunctionManager(snServer *Server, readOnly bool, options *ManagerOptions, sessionID string) (*PulsarFunctionManager, error) { // Get Pulsar client and admin client if snServer.PulsarSession == nil { - return nil, fmt.Errorf("Pulsar session not found in context") + return nil, fmt.Errorf("pulsar session not found in context") } // Get Pulsar client from session using type-safe interface diff --git a/pkg/mcp/prompts.go b/pkg/mcp/prompts.go index edec048..5f45be5 100644 --- a/pkg/mcp/prompts.go +++ b/pkg/mcp/prompts.go @@ -94,7 +94,7 @@ func HandleListPulsarClusters(ctx context.Context, _ mcp.GetPromptRequest) (*mcp if err != nil { return nil, fmt.Errorf("failed to list pulsar clusters: %v", err) } - defer clustersBody.Body.Close() + defer func() { _ = clustersBody.Body.Close() }() var messages = make( []mcp.PromptMessage, @@ -170,7 +170,7 @@ func handleReadPulsarCluster(ctx context.Context, request mcp.GetPromptRequest) if err != nil { return nil, fmt.Errorf("failed to list pulsar clusters: %v", err) } - defer clustersBody.Body.Close() + defer func() { _ = clustersBody.Body.Close() }() var cluster sncloud.ComGithubStreamnativeCloudApiServerPkgApisCloudV1alpha1PulsarCluster for _, c := range clusters.Items { if *c.Metadata.Name == name { @@ -248,7 +248,7 @@ func handleBuildServerlessPulsarCluster(ctx context.Context, request mcp.GetProm if err != nil { return nil, fmt.Errorf("failed to list pool options: %v", err) } - defer poolOptionsBody.Body.Close() + defer func() { _ = poolOptionsBody.Body.Close() }() if poolOptions == nil { return nil, fmt.Errorf("no pool options found") } diff --git a/pkg/mcp/sncontext_utils.go b/pkg/mcp/sncontext_utils.go index 562c0cd..dd7b8b7 100644 --- a/pkg/mcp/sncontext_utils.go +++ b/pkg/mcp/sncontext_utils.go @@ -50,7 +50,7 @@ func SetContext(ctx context.Context, options *config.Options, instanceName, clus if err != nil { return fmt.Errorf("failed to list pulsar instances: %v", err) } - defer instancesBody.Body.Close() + defer func() { _ = instancesBody.Body.Close() }() var instance sncloud.ComGithubStreamnativeCloudApiServerPkgApisCloudV1alpha1PulsarInstance foundInstance := false @@ -61,18 +61,18 @@ func SetContext(ctx context.Context, options *config.Options, instanceName, clus foundInstance = true break } - return fmt.Errorf("Pulsar instance %s is not valid", instanceName) + return fmt.Errorf("pulsar instance %s is not valid", instanceName) } } if !foundInstance { - return fmt.Errorf("Pulsar instance %s not found in organization %s", instanceName, options.Organization) + return fmt.Errorf("pulsar instance %s not found in organization %s", instanceName, options.Organization) } clusters, clustersBody, err := apiClient.CloudStreamnativeIoV1alpha1Api.ListCloudStreamnativeIoV1alpha1NamespacedPulsarCluster(ctx, options.Organization).Execute() if err != nil { return fmt.Errorf("failed to list pulsar clusters: %v", err) } - defer clustersBody.Body.Close() + defer func() { _ = clustersBody.Body.Close() }() var cluster sncloud.ComGithubStreamnativeCloudApiServerPkgApisCloudV1alpha1PulsarCluster foundCluster := false for _, c := range clusters.Items { @@ -82,11 +82,11 @@ func SetContext(ctx context.Context, options *config.Options, instanceName, clus foundCluster = true break } - return fmt.Errorf("Pulsar cluster %s is not available", clusterName) + return fmt.Errorf("pulsar cluster %s is not available", clusterName) } } if !foundCluster { - return fmt.Errorf("Pulsar cluster %s not found", clusterName) + return fmt.Errorf("pulsar cluster %s not found", clusterName) } clusterUID := string(*cluster.Metadata.Uid) diff --git a/pkg/mcp/streamnative_resources_log_tools.go b/pkg/mcp/streamnative_resources_log_tools.go index e3d2a0f..28c1f88 100644 --- a/pkg/mcp/streamnative_resources_log_tools.go +++ b/pkg/mcp/streamnative_resources_log_tools.go @@ -241,7 +241,7 @@ func (o *LogOptions) getLogs(client *http.Client, position int64, if err != nil { return results, fmt.Errorf("failed to request logs (%s): %v", url, err) } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() var logResult LogResult var body []byte diff --git a/pkg/mcp/streamnative_resources_tools.go b/pkg/mcp/streamnative_resources_tools.go index 8798def..b42af48 100644 --- a/pkg/mcp/streamnative_resources_tools.go +++ b/pkg/mcp/streamnative_resources_tools.go @@ -190,7 +190,7 @@ func applyPulsarInstance(ctx context.Context, apiClient *sncloud.APIClient, json if name != "" { // Try to get existing resource existingInstance, bdy, err := apiClient.CloudStreamnativeIoV1alpha1Api.ReadCloudStreamnativeIoV1alpha1NamespacedPulsarInstance(ctx, name, organization).Execute() - defer bdy.Body.Close() + defer func() { _ = bdy.Body.Close() }() if err == nil { exists = true if existingInstance.Metadata != nil && existingInstance.Metadata.ResourceVersion != nil { @@ -223,7 +223,7 @@ func applyPulsarInstance(ctx context.Context, apiClient *sncloud.APIClient, json request = request.DryRun(dryRunStr) } _, bdy, err = request.Execute() - defer bdy.Body.Close() + defer func() { _ = bdy.Body.Close() }() } else { verb = "created" // Create new resource @@ -233,7 +233,7 @@ func applyPulsarInstance(ctx context.Context, apiClient *sncloud.APIClient, json request = request.DryRun(dryRunStr) } _, bdy, err = request.Execute() - defer bdy.Body.Close() + defer func() { _ = bdy.Body.Close() }() } if err != nil { @@ -277,7 +277,7 @@ func applyPulsarCluster(ctx context.Context, apiClient *sncloud.APIClient, jsonC if name != "" { // Try to get existing resource existingCluster, bdy, err := apiClient.CloudStreamnativeIoV1alpha1Api.ReadCloudStreamnativeIoV1alpha1NamespacedPulsarCluster(ctx, name, organization).Execute() - defer bdy.Body.Close() + defer func() { _ = bdy.Body.Close() }() if err == nil { exists = true if existingCluster.Metadata != nil && existingCluster.Metadata.ResourceVersion != nil { @@ -311,7 +311,7 @@ func applyPulsarCluster(ctx context.Context, apiClient *sncloud.APIClient, jsonC } _, bdy, err = request.Execute() - defer bdy.Body.Close() + defer func() { _ = bdy.Body.Close() }() } else { verb = "created" // Create new resource @@ -321,7 +321,7 @@ func applyPulsarCluster(ctx context.Context, apiClient *sncloud.APIClient, jsonC request = request.DryRun(dryRunStr) } _, bdy, err = request.Execute() - defer bdy.Body.Close() + defer func() { _ = bdy.Body.Close() }() } if err != nil { diff --git a/pkg/pulsar/connection.go b/pkg/pulsar/connection.go index 578dff7..1b43b95 100644 --- a/pkg/pulsar/connection.go +++ b/pkg/pulsar/connection.go @@ -25,10 +25,11 @@ import ( ) const ( + // DefaultClientTimeout is the default timeout for Pulsar client operations. DefaultClientTimeout = 30 * time.Second ) -//nolint:revive +// PulsarContext holds configuration for connecting to a Pulsar cluster. type PulsarContext struct { ServiceURL string WebServiceURL string diff --git a/pkg/schema/avro_core.go b/pkg/schema/avro_core.go index be84a53..6d8da4d 100644 --- a/pkg/schema/avro_core.go +++ b/pkg/schema/avro_core.go @@ -63,7 +63,7 @@ func avroFieldToMcpOption(field *avro.Field) (mcp.ToolOption, error) { } isRequired := true - var underlyingTypeForDefault avro.Schema = fieldType // Used to check default value against non-union type + underlyingTypeForDefault := fieldType // Used to check default value against non-union type if unionSchema, ok := fieldType.(*avro.UnionSchema); ok { isNullAble := false diff --git a/pkg/schema/avro_test.go b/pkg/schema/avro_test.go index 58e181c..5a125a8 100644 --- a/pkg/schema/avro_test.go +++ b/pkg/schema/avro_test.go @@ -349,10 +349,11 @@ func TestAvroConverter_SerializeMCPRequestToPulsarPayload(t *testing.T) { var schemaToUse string var argsToMarshal map[string]interface{} - if tt.schemaInfo.Name == "SimpleAvroSerialize" { + switch tt.schemaInfo.Name { + case "SimpleAvroSerialize": schemaToUse = simpleRecordSchema argsToMarshal = tt.args - } else if tt.schemaInfo.Name == "ComplexAvroSerialize" { + case "ComplexAvroSerialize": schemaToUse = complexRecordSchemaString complexArgsCopy := make(map[string]interface{}) for k, v := range tt.args { From 72e180c5e134827fc800368c6c49e888476ec5cc Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Mon, 12 Jan 2026 13:23:07 +0800 Subject: [PATCH 07/13] Revert "refactor(auth): migrate from dgrijalva/jwt-go to golang-jwt/jwt" This reverts commit ebef8620106b47e5a44ea0c89a64a0c279f08b9d. --- go.mod | 2 +- go.sum | 3 +- pkg/auth/auth.go | 10 +-- pkg/auth/authorization_tokenretriever.go | 7 +- pkg/auth/cache/cache.go | 1 - pkg/auth/client_credentials_flow.go | 2 - pkg/auth/client_credentials_provider.go | 10 +-- pkg/auth/oidc_endpoint_provider.go | 2 +- pkg/auth/store/keyring.go | 7 -- pkg/cmd/mcp/server.go | 2 +- pkg/cmd/mcp/stdio.go | 2 +- pkg/config/auth.go | 6 +- pkg/config/config.go | 10 +-- pkg/config/options.go | 74 ++++++++++----------- pkg/kafka/connection.go | 4 +- pkg/kafka/kafkaconnect.go | 14 ++-- pkg/log/io.go | 1 - pkg/mcp/builders/pulsar/schema.go | 3 +- pkg/mcp/builders/registry_test.go | 54 ++++++++++----- pkg/mcp/pftools/manager.go | 2 +- pkg/mcp/prompts.go | 6 +- pkg/mcp/sncontext_utils.go | 12 ++-- pkg/mcp/streamnative_resources_log_tools.go | 2 +- pkg/mcp/streamnative_resources_tools.go | 12 ++-- pkg/pulsar/connection.go | 3 +- pkg/schema/avro_core.go | 2 +- pkg/schema/avro_test.go | 5 +- 27 files changed, 118 insertions(+), 140 deletions(-) diff --git a/go.mod b/go.mod index 3d63a7b..606d990 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.24.4 require ( github.com/99designs/keyring v1.2.2 github.com/apache/pulsar-client-go v0.13.1 - github.com/golang-jwt/jwt v3.2.1+incompatible + github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/google/go-cmp v0.7.0 github.com/hamba/avro/v2 v2.28.0 github.com/mark3labs/mcp-go v0.43.2 diff --git a/go.sum b/go.sum index 1156150..8426de5 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -83,7 +85,6 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index 737b907..0938d5a 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package auth provides authentication and authorization functionality for StreamNative MCP Server. -// It implements OAuth 2.0 flows including client credentials and device authorization grants. package auth import ( @@ -22,13 +20,12 @@ import ( "io" "time" - "github.com/golang-jwt/jwt" + "github.com/dgrijalva/jwt-go" "golang.org/x/oauth2" "k8s.io/utils/clock" ) const ( - // ClaimNameUserName is the JWT claim name for the username. ClaimNameUserName = "https://streamnative.io/username" ) @@ -142,18 +139,17 @@ func ExtractUserName(token oauth2.Token) (string, error) { return "", fmt.Errorf("access token doesn't contain a recognizable user claim") } -// DumpToken outputs token information to the provided writer for debugging. func DumpToken(out io.Writer, token oauth2.Token) { p := jwt.Parser{} claims := jwt.MapClaims{} if _, _, err := p.ParseUnverified(token.AccessToken, claims); err != nil { - _, _ = fmt.Fprintf(out, "Unable to parse token. Err: %v\n", err) + fmt.Fprintf(out, "Unable to parse token. Err: %v\n", err) return } text, err := json.MarshalIndent(claims, "", " ") if err != nil { - _, _ = fmt.Fprintf(out, "Unable to print token. Err: %v\n", err) + fmt.Fprintf(out, "Unable to print token. Err: %v\n", err) } _, _ = out.Write(text) _, _ = fmt.Fprintln(out, "") diff --git a/pkg/auth/authorization_tokenretriever.go b/pkg/auth/authorization_tokenretriever.go index 730e9ff..99c2353 100644 --- a/pkg/auth/authorization_tokenretriever.go +++ b/pkg/auth/authorization_tokenretriever.go @@ -82,7 +82,6 @@ type TokenErrorResponse struct { ErrorDescription string `json:"error_description"` } -// TokenError represents an error response from the token endpoint. type TokenError struct { ErrorCode string ErrorDescription string @@ -223,7 +222,6 @@ func (ce *TokenRetriever) ExchangeCode(req AuthorizationCodeExchangeRequest) (*T if err != nil { return nil, err } - defer func() { _ = response.Body.Close() }() return ce.handleAuthTokensResponse(response) } @@ -232,7 +230,7 @@ func (ce *TokenRetriever) ExchangeCode(req AuthorizationCodeExchangeRequest) (*T // auth tokens for errors and parsing the raw body to a TokenResult struct func (ce *TokenRetriever) handleAuthTokensResponse(resp *http.Response) (*TokenResult, error) { if resp.Body != nil { - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() } if resp.StatusCode < 200 || resp.StatusCode > 299 { @@ -274,7 +272,6 @@ func (ce *TokenRetriever) ExchangeDeviceCode(ctx context.Context, req DeviceCode if err != nil { return nil, err } - defer func() { _ = response.Body.Close() }() token, err := ce.handleAuthTokensResponse(response) if err == nil { return token, nil @@ -317,7 +314,6 @@ func (ce *TokenRetriever) ExchangeRefreshToken(req RefreshTokenExchangeRequest) if err != nil { return nil, err } - defer func() { _ = response.Body.Close() }() return ce.handleAuthTokensResponse(response) } @@ -334,7 +330,6 @@ func (ce *TokenRetriever) ExchangeClientCredentials(req ClientCredentialsExchang if err != nil { return nil, err } - defer func() { _ = response.Body.Close() }() return ce.handleAuthTokensResponse(response) } diff --git a/pkg/auth/cache/cache.go b/pkg/auth/cache/cache.go index 5a2b8ce..70bc133 100644 --- a/pkg/auth/cache/cache.go +++ b/pkg/auth/cache/cache.go @@ -51,7 +51,6 @@ type tokenCache struct { token *oauth2.Token } -// NewDefaultTokenCache creates a default token cache with the given store and refresher. func NewDefaultTokenCache(store store.Store, audience string, refresher auth.AuthorizationGrantRefresher) (CachingTokenSource, error) { cache := &tokenCache{ diff --git a/pkg/auth/client_credentials_flow.go b/pkg/auth/client_credentials_flow.go index 2fa7298..7de8bd4 100644 --- a/pkg/auth/client_credentials_flow.go +++ b/pkg/auth/client_credentials_flow.go @@ -41,7 +41,6 @@ type ClientCredentialsExchanger interface { ExchangeClientCredentials(req ClientCredentialsExchangeRequest) (*TokenResult, error) } -// NewClientCredentialsFlow creates a new client credentials flow with the given components. func NewClientCredentialsFlow( issuerData Issuer, provider ClientCredentialsProvider, @@ -128,7 +127,6 @@ type ClientCredentialsGrantRefresher struct { clock clock.Clock } -// NewDefaultClientCredentialsGrantRefresher creates a default client credentials grant refresher. func NewDefaultClientCredentialsGrantRefresher(issuerData Issuer, clock clock.Clock) (*ClientCredentialsGrantRefresher, error) { wellKnownEndpoints, err := GetOIDCWellKnownEndpointsFromIssuerURL(issuerData.IssuerEndpoint) diff --git a/pkg/auth/client_credentials_provider.go b/pkg/auth/client_credentials_provider.go index 47e84f2..2ab652d 100644 --- a/pkg/auth/client_credentials_provider.go +++ b/pkg/auth/client_credentials_provider.go @@ -18,23 +18,19 @@ import ( "encoding/json" "fmt" "os" - "path/filepath" "strings" ) const ( - // KeyFileTypeServiceAccount identifies service account key files. KeyFileTypeServiceAccount = "sn_service_account" FILE = "file://" DATA = "data://" ) -// KeyFileProvider provides client credentials from a key file path. type KeyFileProvider struct { KeyFile string } -// KeyFile holds service account credentials from a JSON key file. type KeyFile struct { Type string `json:"type"` ClientID string `json:"client_id"` @@ -42,7 +38,6 @@ type KeyFile struct { ClientEmail string `json:"client_email"` } -// NewClientCredentialsProviderFromKeyFile creates a provider from a key file path. func NewClientCredentialsProviderFromKeyFile(keyFile string) *KeyFileProvider { return &KeyFileProvider{ KeyFile: keyFile, @@ -57,7 +52,7 @@ func (k *KeyFileProvider) GetClientCredentials() (*KeyFile, error) { switch { case strings.HasPrefix(k.KeyFile, FILE): filename := strings.TrimPrefix(k.KeyFile, FILE) - keyFile, err = os.ReadFile(filepath.Clean(filename)) + keyFile, err = os.ReadFile(filename) case strings.HasPrefix(k.KeyFile, DATA): keyFile = []byte(strings.TrimPrefix(k.KeyFile, DATA)) case strings.HasPrefix(k.KeyFile, "data:"): @@ -85,12 +80,10 @@ func (k *KeyFileProvider) GetClientCredentials() (*KeyFile, error) { return &v, nil } -// KeyFileStructProvider provides client credentials from an in-memory KeyFile struct. type KeyFileStructProvider struct { KeyFile *KeyFile } -// GetClientCredentials returns the client credentials from the in-memory KeyFile. func (k *KeyFileStructProvider) GetClientCredentials() (*KeyFile, error) { if k.KeyFile == nil { return nil, fmt.Errorf("key file is nil") @@ -98,7 +91,6 @@ func (k *KeyFileStructProvider) GetClientCredentials() (*KeyFile, error) { return k.KeyFile, nil } -// NewClientCredentialsProviderFromKeyFileStruct creates a provider from an in-memory KeyFile. func NewClientCredentialsProviderFromKeyFileStruct(keyFile *KeyFile) *KeyFileStructProvider { return &KeyFileStructProvider{ KeyFile: keyFile, diff --git a/pkg/auth/oidc_endpoint_provider.go b/pkg/auth/oidc_endpoint_provider.go index 62ecf1d..69c9ef1 100644 --- a/pkg/auth/oidc_endpoint_provider.go +++ b/pkg/auth/oidc_endpoint_provider.go @@ -43,7 +43,7 @@ func GetOIDCWellKnownEndpointsFromIssuerURL(issuerURL string) (*OIDCWellKnownEnd if err != nil { return nil, errors.Wrapf(err, "could not get well known endpoints from url %s", u.String()) } - defer func() { _ = r.Body.Close() }() + defer r.Body.Close() var wkEndpoints OIDCWellKnownEndpoints err = json.NewDecoder(r.Body).Decode(&wkEndpoints) diff --git a/pkg/auth/store/keyring.go b/pkg/auth/store/keyring.go index f2c47f2..ae09e2d 100644 --- a/pkg/auth/store/keyring.go +++ b/pkg/auth/store/keyring.go @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package store provides token storage implementations for authentication credentials. -// It includes a KeyringStore implementation that uses the system keyring for secure storage. package store import ( @@ -27,7 +25,6 @@ import ( "k8s.io/utils/clock" ) -// KeyringStore provides secure token storage using the system keyring. type KeyringStore struct { kr keyring.Keyring clock clock.Clock @@ -51,7 +48,6 @@ func NewKeyringStore(kr keyring.Keyring) (*KeyringStore, error) { var _ Store = &KeyringStore{} -// SaveGrant saves an authorization grant to the keyring. func (f *KeyringStore) SaveGrant(audience string, grant auth.AuthorizationGrant) error { f.lock.Lock() defer f.lock.Unlock() @@ -79,7 +75,6 @@ func (f *KeyringStore) SaveGrant(audience string, grant auth.AuthorizationGrant) return nil } -// LoadGrant loads an authorization grant from the keyring. func (f *KeyringStore) LoadGrant(audience string) (*auth.AuthorizationGrant, error) { f.lock.Lock() defer f.lock.Unlock() @@ -102,7 +97,6 @@ func (f *KeyringStore) LoadGrant(audience string) (*auth.AuthorizationGrant, err return &item.Grant, nil } -// WhoAmI returns the username associated with the grant for the given audience. func (f *KeyringStore) WhoAmI(audience string) (string, error) { f.lock.Lock() defer f.lock.Unlock() @@ -138,7 +132,6 @@ func (f *KeyringStore) WhoAmI(audience string) (string, error) { return label, err } -// Logout removes all stored grants from the keyring. func (f *KeyringStore) Logout() error { f.lock.Lock() defer f.lock.Unlock() diff --git a/pkg/cmd/mcp/server.go b/pkg/cmd/mcp/server.go index 3dd5aa1..5de74c8 100644 --- a/pkg/cmd/mcp/server.go +++ b/pkg/cmd/mcp/server.go @@ -29,7 +29,7 @@ import ( ) func newMcpServer(_ context.Context, configOpts *ServerOptions, logrusLogger *logrus.Logger) (*mcp.Server, error) { - snConfig := configOpts.LoadConfigOrDie() + snConfig := configOpts.Options.LoadConfigOrDie() var s *server.MCPServer var mcpServer *mcp.Server switch { diff --git a/pkg/cmd/mcp/stdio.go b/pkg/cmd/mcp/stdio.go index 8bf7dca..b06ecb6 100644 --- a/pkg/cmd/mcp/stdio.go +++ b/pkg/cmd/mcp/stdio.go @@ -110,7 +110,7 @@ func initLogger(filePath string) (*logrus.Logger, error) { return logrus.New(), nil } - fd, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) + fd, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return nil, fmt.Errorf("failed to open log file: %w", err) } diff --git a/pkg/config/auth.go b/pkg/config/auth.go index e7780bf..e605a50 100644 --- a/pkg/config/auth.go +++ b/pkg/config/auth.go @@ -24,13 +24,10 @@ import ( ) const ( - // ServiceName is the name used for keyring service. - ServiceName = "StreamNativeMCP" - // KeychainName is the name of the macOS keychain. + ServiceName = "StreamNativeMCP" KeychainName = "snmcp" ) -// AuthOptions provides configuration options for authentication. type AuthOptions struct { BackendOverride string storage Storage @@ -40,7 +37,6 @@ type AuthOptions struct { store.Store } -// NewDefaultAuthOptions creates a new AuthOptions with default values. func NewDefaultAuthOptions() AuthOptions { return AuthOptions{} } diff --git a/pkg/config/config.go b/pkg/config/config.go index 89e0561..2084e0f 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -21,7 +21,6 @@ import ( "github.com/streamnative/streamnative-mcp-server/pkg/auth" ) -// SnConfig holds the StreamNative MCP Server configuration. type SnConfig struct { // the API server endpoint Server string `yaml:"server"` @@ -41,7 +40,6 @@ type SnConfig struct { ExternalPulsar *ExternalPulsar `yaml:"external-pulsar"` } -// Auth holds authentication configuration for the StreamNative API. type Auth struct { // the OAuth 2.0 issuer endpoint IssuerEndpoint string `yaml:"issuer"` @@ -52,10 +50,10 @@ type Auth struct { } func (a *Auth) Validate() error { - if isValidIssuer(a.IssuerEndpoint) && isValidClientID(a.ClientID) && isValidAudience(a.Audience) { - return nil + if !(isValidIssuer(a.IssuerEndpoint) && isValidClientID(a.ClientID) && isValidAudience(a.Audience)) { + return errors.New("configuration error: auth section is incomplete or invalid") } - return errors.New("configuration error: auth section is incomplete or invalid") + return nil } func isValidIssuer(iss string) bool { @@ -79,14 +77,12 @@ func (a *Auth) Issuer() auth.Issuer { } } -// Context holds the default context for cluster connections. type Context struct { Organization string `yaml:"organization,omitempty"` PulsarInstance string `yaml:"pulsar-instance,omitempty"` PulsarCluster string `yaml:"pulsar-cluster,omitempty"` } -// Storage defines the interface for persisting configuration and credentials. type Storage interface { // Gets the config directory for configuration files, credentials and caches GetConfigDirectory() string diff --git a/pkg/config/options.go b/pkg/config/options.go index 5078dc3..bd5cbcc 100644 --- a/pkg/config/options.go +++ b/pkg/config/options.go @@ -162,42 +162,42 @@ func (o *Options) AddFlags(cmd *cobra.Command) { o.AuthOptions.AddFlags(cmd) // Bind command line flags to viper - _ = viper.BindPFlag("config-dir", cmd.PersistentFlags().Lookup("config-dir")) - _ = viper.BindPFlag("key-file", cmd.PersistentFlags().Lookup("key-file")) - _ = viper.BindPFlag("server", cmd.PersistentFlags().Lookup("server")) - _ = viper.BindPFlag("issuer", cmd.PersistentFlags().Lookup("issuer")) - _ = viper.BindPFlag("audience", cmd.PersistentFlags().Lookup("audience")) - _ = viper.BindPFlag("client-id", cmd.PersistentFlags().Lookup("client-id")) - _ = viper.BindPFlag("organization", cmd.PersistentFlags().Lookup("organization")) - _ = viper.BindPFlag("proxy-location", cmd.PersistentFlags().Lookup("proxy-location")) - _ = viper.BindPFlag("log-location", cmd.PersistentFlags().Lookup("log-location")) - _ = viper.BindPFlag("pulsar-instance", cmd.PersistentFlags().Lookup("pulsar-instance")) - _ = viper.BindPFlag("pulsar-cluster", cmd.PersistentFlags().Lookup("pulsar-cluster")) - _ = viper.BindPFlag("use-external-kafka", cmd.PersistentFlags().Lookup("use-external-kafka")) - _ = viper.BindPFlag("use-external-pulsar", cmd.PersistentFlags().Lookup("use-external-pulsar")) - _ = viper.BindPFlag("kafka-bootstrap-servers", cmd.PersistentFlags().Lookup("kafka-bootstrap-servers")) - _ = viper.BindPFlag("kafka-schema-registry-url", cmd.PersistentFlags().Lookup("kafka-schema-registry-url")) - _ = viper.BindPFlag("kafka-auth-type", cmd.PersistentFlags().Lookup("kafka-auth-type")) - _ = viper.BindPFlag("kafka-auth-mechanism", cmd.PersistentFlags().Lookup("kafka-auth-mechanism")) - _ = viper.BindPFlag("kafka-auth-user", cmd.PersistentFlags().Lookup("kafka-auth-user")) - _ = viper.BindPFlag("kafka-auth-pass", cmd.PersistentFlags().Lookup("kafka-auth-pass")) - _ = viper.BindPFlag("kafka-use-tls", cmd.PersistentFlags().Lookup("kafka-use-tls")) - _ = viper.BindPFlag("kafka-client-key-file", cmd.PersistentFlags().Lookup("kafka-client-key-file")) - _ = viper.BindPFlag("kafka-client-cert-file", cmd.PersistentFlags().Lookup("kafka-client-cert-file")) - _ = viper.BindPFlag("kafka-ca-file", cmd.PersistentFlags().Lookup("kafka-ca-file")) - _ = viper.BindPFlag("kafka-schema-registry-auth-user", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-user")) - _ = viper.BindPFlag("kafka-schema-registry-auth-pass", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-pass")) - _ = viper.BindPFlag("kafka-schema-registry-bearer-token", cmd.PersistentFlags().Lookup("kafka-schema-registry-bearer-token")) - _ = viper.BindPFlag("pulsar-web-service-url", cmd.PersistentFlags().Lookup("pulsar-web-service-url")) - _ = viper.BindPFlag("pulsar-service-url", cmd.PersistentFlags().Lookup("pulsar-service-url")) - _ = viper.BindPFlag("pulsar-auth-plugin", cmd.PersistentFlags().Lookup("pulsar-auth-plugin")) - _ = viper.BindPFlag("pulsar-auth-params", cmd.PersistentFlags().Lookup("pulsar-auth-params")) - _ = viper.BindPFlag("pulsar-tls-allow-insecure-connection", cmd.PersistentFlags().Lookup("pulsar-tls-allow-insecure-connection")) - _ = viper.BindPFlag("pulsar-tls-enable-hostname-verification", cmd.PersistentFlags().Lookup("pulsar-tls-enable-hostname-verification")) - _ = viper.BindPFlag("pulsar-tls-trust-certs-file-path", cmd.PersistentFlags().Lookup("pulsar-tls-trust-certs-file-path")) - _ = viper.BindPFlag("pulsar-tls-cert-file", cmd.PersistentFlags().Lookup("pulsar-tls-cert-file")) - _ = viper.BindPFlag("pulsar-tls-key-file", cmd.PersistentFlags().Lookup("pulsar-tls-key-file")) - _ = viper.BindPFlag("pulsar-token", cmd.PersistentFlags().Lookup("pulsar-token")) + viper.BindPFlag("config-dir", cmd.PersistentFlags().Lookup("config-dir")) + viper.BindPFlag("key-file", cmd.PersistentFlags().Lookup("key-file")) + viper.BindPFlag("server", cmd.PersistentFlags().Lookup("server")) + viper.BindPFlag("issuer", cmd.PersistentFlags().Lookup("issuer")) + viper.BindPFlag("audience", cmd.PersistentFlags().Lookup("audience")) + viper.BindPFlag("client-id", cmd.PersistentFlags().Lookup("client-id")) + viper.BindPFlag("organization", cmd.PersistentFlags().Lookup("organization")) + viper.BindPFlag("proxy-location", cmd.PersistentFlags().Lookup("proxy-location")) + viper.BindPFlag("log-location", cmd.PersistentFlags().Lookup("log-location")) + viper.BindPFlag("pulsar-instance", cmd.PersistentFlags().Lookup("pulsar-instance")) + viper.BindPFlag("pulsar-cluster", cmd.PersistentFlags().Lookup("pulsar-cluster")) + viper.BindPFlag("use-external-kafka", cmd.PersistentFlags().Lookup("use-external-kafka")) + viper.BindPFlag("use-external-pulsar", cmd.PersistentFlags().Lookup("use-external-pulsar")) + viper.BindPFlag("kafka-bootstrap-servers", cmd.PersistentFlags().Lookup("kafka-bootstrap-servers")) + viper.BindPFlag("kafka-schema-registry-url", cmd.PersistentFlags().Lookup("kafka-schema-registry-url")) + viper.BindPFlag("kafka-auth-type", cmd.PersistentFlags().Lookup("kafka-auth-type")) + viper.BindPFlag("kafka-auth-mechanism", cmd.PersistentFlags().Lookup("kafka-auth-mechanism")) + viper.BindPFlag("kafka-auth-user", cmd.PersistentFlags().Lookup("kafka-auth-user")) + viper.BindPFlag("kafka-auth-pass", cmd.PersistentFlags().Lookup("kafka-auth-pass")) + viper.BindPFlag("kafka-use-tls", cmd.PersistentFlags().Lookup("kafka-use-tls")) + viper.BindPFlag("kafka-client-key-file", cmd.PersistentFlags().Lookup("kafka-client-key-file")) + viper.BindPFlag("kafka-client-cert-file", cmd.PersistentFlags().Lookup("kafka-client-cert-file")) + viper.BindPFlag("kafka-ca-file", cmd.PersistentFlags().Lookup("kafka-ca-file")) + viper.BindPFlag("kafka-schema-registry-auth-user", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-user")) + viper.BindPFlag("kafka-schema-registry-auth-pass", cmd.PersistentFlags().Lookup("kafka-schema-registry-auth-pass")) + viper.BindPFlag("kafka-schema-registry-bearer-token", cmd.PersistentFlags().Lookup("kafka-schema-registry-bearer-token")) + viper.BindPFlag("pulsar-web-service-url", cmd.PersistentFlags().Lookup("pulsar-web-service-url")) + viper.BindPFlag("pulsar-service-url", cmd.PersistentFlags().Lookup("pulsar-service-url")) + viper.BindPFlag("pulsar-auth-plugin", cmd.PersistentFlags().Lookup("pulsar-auth-plugin")) + viper.BindPFlag("pulsar-auth-params", cmd.PersistentFlags().Lookup("pulsar-auth-params")) + viper.BindPFlag("pulsar-tls-allow-insecure-connection", cmd.PersistentFlags().Lookup("pulsar-tls-allow-insecure-connection")) + viper.BindPFlag("pulsar-tls-enable-hostname-verification", cmd.PersistentFlags().Lookup("pulsar-tls-enable-hostname-verification")) + viper.BindPFlag("pulsar-tls-trust-certs-file-path", cmd.PersistentFlags().Lookup("pulsar-tls-trust-certs-file-path")) + viper.BindPFlag("pulsar-tls-cert-file", cmd.PersistentFlags().Lookup("pulsar-tls-cert-file")) + viper.BindPFlag("pulsar-tls-key-file", cmd.PersistentFlags().Lookup("pulsar-tls-key-file")) + viper.BindPFlag("pulsar-token", cmd.PersistentFlags().Lookup("pulsar-token")) } // Complete completes options from the provided values @@ -212,7 +212,7 @@ func (o *Options) Complete() error { o.ConfigDir = filepath.Join(home, ".snmcp") } if _, err := os.Stat(o.ConfigDir); os.IsNotExist(err) { - err := os.MkdirAll(o.ConfigDir, 0750) + err := os.MkdirAll(o.ConfigDir, 0755) if err != nil { return fmt.Errorf("failed to create config directory: %w", err) } diff --git a/pkg/kafka/connection.go b/pkg/kafka/connection.go index e87ab8d..6db0d48 100644 --- a/pkg/kafka/connection.go +++ b/pkg/kafka/connection.go @@ -80,14 +80,12 @@ func NewSession(ctx KafkaContext) (*Session, error) { return session, nil } -// SASLConfig holds SASL authentication configuration. type SASLConfig struct { Mechanism string Username string Password string } -// TLSConfig holds TLS configuration for Kafka connections. type TLSConfig struct { Enabled bool ClientKeyFile string @@ -118,7 +116,7 @@ func tlsOpt(config *TLSConfig, opts []kgo.Opt) ([]kgo.Opt, error) { func saslOpt(config *SASLConfig, opts []kgo.Opt) ([]kgo.Opt, error) { if config.Mechanism != "" || config.Username != "" || config.Password != "" { if config.Mechanism == "" || config.Username == "" || config.Password == "" { - return nil, fmt.Errorf("all of Mechanism, Username, and Password must be specified if any are") + return nil, fmt.Errorf("All of Mechanism, Username, and Password must be specified if any are") } method := strings.ToLower(config.Mechanism) method = strings.ReplaceAll(method, "-", "") diff --git a/pkg/kafka/kafkaconnect.go b/pkg/kafka/kafkaconnect.go index bd4c467..8819aa2 100644 --- a/pkg/kafka/kafkaconnect.go +++ b/pkg/kafka/kafkaconnect.go @@ -192,7 +192,7 @@ func (c *connectImpl) ListConnectors(_ context.Context) ([]string, error) { if err != nil { return nil, fmt.Errorf("failed to list connectors: %w", err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() // Parse response body body, err := io.ReadAll(resp.Body) @@ -247,7 +247,7 @@ func (c *connectImpl) CreateConnector(_ context.Context, name string, config map if err != nil { return nil, fmt.Errorf("failed to create connector: %w", err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() // Parse response body body, err := io.ReadAll(resp.Body) @@ -298,7 +298,7 @@ func (c *connectImpl) UpdateConnector(_ context.Context, name string, config map if err != nil { return nil, fmt.Errorf("failed to update connector: %w", err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() // Parse response body body, err := io.ReadAll(resp.Body) @@ -393,7 +393,7 @@ func (c *connectImpl) GetConnectorStatus(_ context.Context, name string) (*Conne if err != nil { return nil, fmt.Errorf("failed to get connector status: %w", err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() // Parse response body body, err := io.ReadAll(resp.Body) @@ -448,7 +448,7 @@ func (c *connectImpl) GetConnectorTasks(_ context.Context, name string) ([]TaskI if err != nil { return nil, fmt.Errorf("failed to get connector tasks: %w", err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() // Parse response body body, err := io.ReadAll(resp.Body) @@ -489,7 +489,7 @@ func (c *connectImpl) ListPlugins(_ context.Context) ([]PluginInfo, error) { if err != nil { return nil, fmt.Errorf("failed to list connector plugins: %w", err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() // Parse response body body, err := io.ReadAll(resp.Body) @@ -536,7 +536,7 @@ func (c *connectImpl) ValidateConfig(_ context.Context, pluginClass string, conf if err != nil { return nil, fmt.Errorf("failed to validate connector config: %w", err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() // Parse response body body, err := io.ReadAll(resp.Body) diff --git a/pkg/log/io.go b/pkg/log/io.go index 8ebcb3a..4c831fe 100644 --- a/pkg/log/io.go +++ b/pkg/log/io.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package log provides logging utilities for StreamNative MCP Server. package log import ( diff --git a/pkg/mcp/builders/pulsar/schema.go b/pkg/mcp/builders/pulsar/schema.go index 13fc41a..8bf4de3 100644 --- a/pkg/mcp/builders/pulsar/schema.go +++ b/pkg/mcp/builders/pulsar/schema.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "os" - "path/filepath" "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" @@ -265,7 +264,7 @@ func (b *PulsarAdminSchemaToolBuilder) handleSchemaUpload(admin cmdutils.Client, // Read and parse the schema file var payload utils.PostSchemaPayload - file, err := os.ReadFile(filepath.Clean(filename)) + file, err := os.ReadFile(filename) if err != nil { return mcp.NewToolResultError(fmt.Sprintf("Failed to read schema file '%s': %v", filename, err)), nil } diff --git a/pkg/mcp/builders/registry_test.go b/pkg/mcp/builders/registry_test.go index 766c266..a4de66b 100644 --- a/pkg/mcp/builders/registry_test.go +++ b/pkg/mcp/builders/registry_test.go @@ -148,7 +148,8 @@ func TestToolRegistry(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("panic_tool", []string{"feature"}) - _ = registry.Register(builder) // First registration + ///nolint:errcheck + registry.Register(builder) // First registration assert.Panics(t, func() { registry.MustRegister(builder) // Duplicate registration should panic }) @@ -157,7 +158,8 @@ func TestToolRegistry(t *testing.T) { t.Run("GetBuilder_Success", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("get_test_tool", []string{"feature"}) - _ = registry.Register(builder) + ///nolint:errcheck + registry.Register(builder) retrieved, exists := registry.GetBuilder("get_test_tool") assert.True(t, exists) @@ -176,8 +178,10 @@ func TestToolRegistry(t *testing.T) { builder1 := NewMockToolBuilder("tool1", []string{"feature1"}) builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) - _ = registry.Register(builder1) - _ = registry.Register(builder2) + ///nolint:errcheck + registry.Register(builder1) + ///nolint:errcheck + registry.Register(builder2) names := registry.ListBuilders() assert.Len(t, names, 2) @@ -190,7 +194,8 @@ func TestToolRegistry(t *testing.T) { t.Run("GetMetadata", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("metadata_tool", []string{"feature"}) - _ = registry.Register(builder) + ///nolint:errcheck + registry.Register(builder) metadata, exists := registry.GetMetadata("metadata_tool") assert.True(t, exists) @@ -203,8 +208,10 @@ func TestToolRegistry(t *testing.T) { builder1 := NewMockToolBuilder("tool1", []string{"feature1"}) builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) - _ = registry.Register(builder1) - _ = registry.Register(builder2) + ///nolint:errcheck + registry.Register(builder1) + ///nolint:errcheck + registry.Register(builder2) metadata := registry.ListMetadata() assert.Len(t, metadata, 2) @@ -215,7 +222,8 @@ func TestToolRegistry(t *testing.T) { t.Run("BuildSingle_Success", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("single_tool", []string{"test_feature"}) - _ = registry.Register(builder) + ///nolint:errcheck + registry.Register(builder) config := ToolBuildConfig{ Features: []string{"test_feature"}, @@ -242,7 +250,8 @@ func TestToolRegistry(t *testing.T) { t.Run("BuildSingle_ValidationFailed", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("validation_tool", []string{"required_feature"}) - _ = registry.Register(builder) + ///nolint:errcheck + registry.Register(builder) config := ToolBuildConfig{ Features: []string{"wrong_feature"}, @@ -258,8 +267,10 @@ func TestToolRegistry(t *testing.T) { builder1 := NewMockToolBuilder("tool1", []string{"feature1"}) builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) - _ = registry.Register(builder1) - _ = registry.Register(builder2) + ///nolint:errcheck + registry.Register(builder1) + ///nolint:errcheck + registry.Register(builder2) configs := map[string]ToolBuildConfig{ "tool1": {Features: []string{"feature1"}}, @@ -278,8 +289,10 @@ func TestToolRegistry(t *testing.T) { builder2.SetError(fmt.Errorf("build error")) - _ = registry.Register(builder1) - _ = registry.Register(builder2) + ///nolint:errcheck + registry.Register(builder1) + ///nolint:errcheck + registry.Register(builder2) configs := map[string]ToolBuildConfig{ "tool1": {Features: []string{"feature1"}}, @@ -297,9 +310,12 @@ func TestToolRegistry(t *testing.T) { builder2 := NewMockToolBuilder("tool2", []string{"feature2"}) builder3 := NewMockToolBuilder("tool3", []string{"feature3"}) - _ = registry.Register(builder1) - _ = registry.Register(builder2) - _ = registry.Register(builder3) + ///nolint:errcheck + registry.Register(builder1) + ///nolint:errcheck + registry.Register(builder2) + ///nolint:errcheck + registry.Register(builder3) // Only provide feature1 and feature2 tools, err := registry.BuildAllWithFeatures(false, []string{"feature1", "feature2"}) @@ -310,7 +326,8 @@ func TestToolRegistry(t *testing.T) { t.Run("Clear", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("clear_tool", []string{"feature"}) - _ = registry.Register(builder) + ///nolint:errcheck + registry.Register(builder) assert.Equal(t, 1, registry.Count()) @@ -321,7 +338,8 @@ func TestToolRegistry(t *testing.T) { t.Run("Unregister", func(t *testing.T) { registry := NewToolRegistry() builder := NewMockToolBuilder("unregister_tool", []string{"feature"}) - _ = registry.Register(builder) + ///nolint:errcheck + registry.Register(builder) assert.Equal(t, 1, registry.Count()) diff --git a/pkg/mcp/pftools/manager.go b/pkg/mcp/pftools/manager.go index 9e0dd91..0fef260 100644 --- a/pkg/mcp/pftools/manager.go +++ b/pkg/mcp/pftools/manager.go @@ -61,7 +61,7 @@ type Server struct { func NewPulsarFunctionManager(snServer *Server, readOnly bool, options *ManagerOptions, sessionID string) (*PulsarFunctionManager, error) { // Get Pulsar client and admin client if snServer.PulsarSession == nil { - return nil, fmt.Errorf("pulsar session not found in context") + return nil, fmt.Errorf("Pulsar session not found in context") } // Get Pulsar client from session using type-safe interface diff --git a/pkg/mcp/prompts.go b/pkg/mcp/prompts.go index 5f45be5..edec048 100644 --- a/pkg/mcp/prompts.go +++ b/pkg/mcp/prompts.go @@ -94,7 +94,7 @@ func HandleListPulsarClusters(ctx context.Context, _ mcp.GetPromptRequest) (*mcp if err != nil { return nil, fmt.Errorf("failed to list pulsar clusters: %v", err) } - defer func() { _ = clustersBody.Body.Close() }() + defer clustersBody.Body.Close() var messages = make( []mcp.PromptMessage, @@ -170,7 +170,7 @@ func handleReadPulsarCluster(ctx context.Context, request mcp.GetPromptRequest) if err != nil { return nil, fmt.Errorf("failed to list pulsar clusters: %v", err) } - defer func() { _ = clustersBody.Body.Close() }() + defer clustersBody.Body.Close() var cluster sncloud.ComGithubStreamnativeCloudApiServerPkgApisCloudV1alpha1PulsarCluster for _, c := range clusters.Items { if *c.Metadata.Name == name { @@ -248,7 +248,7 @@ func handleBuildServerlessPulsarCluster(ctx context.Context, request mcp.GetProm if err != nil { return nil, fmt.Errorf("failed to list pool options: %v", err) } - defer func() { _ = poolOptionsBody.Body.Close() }() + defer poolOptionsBody.Body.Close() if poolOptions == nil { return nil, fmt.Errorf("no pool options found") } diff --git a/pkg/mcp/sncontext_utils.go b/pkg/mcp/sncontext_utils.go index dd7b8b7..562c0cd 100644 --- a/pkg/mcp/sncontext_utils.go +++ b/pkg/mcp/sncontext_utils.go @@ -50,7 +50,7 @@ func SetContext(ctx context.Context, options *config.Options, instanceName, clus if err != nil { return fmt.Errorf("failed to list pulsar instances: %v", err) } - defer func() { _ = instancesBody.Body.Close() }() + defer instancesBody.Body.Close() var instance sncloud.ComGithubStreamnativeCloudApiServerPkgApisCloudV1alpha1PulsarInstance foundInstance := false @@ -61,18 +61,18 @@ func SetContext(ctx context.Context, options *config.Options, instanceName, clus foundInstance = true break } - return fmt.Errorf("pulsar instance %s is not valid", instanceName) + return fmt.Errorf("Pulsar instance %s is not valid", instanceName) } } if !foundInstance { - return fmt.Errorf("pulsar instance %s not found in organization %s", instanceName, options.Organization) + return fmt.Errorf("Pulsar instance %s not found in organization %s", instanceName, options.Organization) } clusters, clustersBody, err := apiClient.CloudStreamnativeIoV1alpha1Api.ListCloudStreamnativeIoV1alpha1NamespacedPulsarCluster(ctx, options.Organization).Execute() if err != nil { return fmt.Errorf("failed to list pulsar clusters: %v", err) } - defer func() { _ = clustersBody.Body.Close() }() + defer clustersBody.Body.Close() var cluster sncloud.ComGithubStreamnativeCloudApiServerPkgApisCloudV1alpha1PulsarCluster foundCluster := false for _, c := range clusters.Items { @@ -82,11 +82,11 @@ func SetContext(ctx context.Context, options *config.Options, instanceName, clus foundCluster = true break } - return fmt.Errorf("pulsar cluster %s is not available", clusterName) + return fmt.Errorf("Pulsar cluster %s is not available", clusterName) } } if !foundCluster { - return fmt.Errorf("pulsar cluster %s not found", clusterName) + return fmt.Errorf("Pulsar cluster %s not found", clusterName) } clusterUID := string(*cluster.Metadata.Uid) diff --git a/pkg/mcp/streamnative_resources_log_tools.go b/pkg/mcp/streamnative_resources_log_tools.go index 28c1f88..e3d2a0f 100644 --- a/pkg/mcp/streamnative_resources_log_tools.go +++ b/pkg/mcp/streamnative_resources_log_tools.go @@ -241,7 +241,7 @@ func (o *LogOptions) getLogs(client *http.Client, position int64, if err != nil { return results, fmt.Errorf("failed to request logs (%s): %v", url, err) } - defer func() { _ = resp.Body.Close() }() + defer resp.Body.Close() var logResult LogResult var body []byte diff --git a/pkg/mcp/streamnative_resources_tools.go b/pkg/mcp/streamnative_resources_tools.go index b42af48..8798def 100644 --- a/pkg/mcp/streamnative_resources_tools.go +++ b/pkg/mcp/streamnative_resources_tools.go @@ -190,7 +190,7 @@ func applyPulsarInstance(ctx context.Context, apiClient *sncloud.APIClient, json if name != "" { // Try to get existing resource existingInstance, bdy, err := apiClient.CloudStreamnativeIoV1alpha1Api.ReadCloudStreamnativeIoV1alpha1NamespacedPulsarInstance(ctx, name, organization).Execute() - defer func() { _ = bdy.Body.Close() }() + defer bdy.Body.Close() if err == nil { exists = true if existingInstance.Metadata != nil && existingInstance.Metadata.ResourceVersion != nil { @@ -223,7 +223,7 @@ func applyPulsarInstance(ctx context.Context, apiClient *sncloud.APIClient, json request = request.DryRun(dryRunStr) } _, bdy, err = request.Execute() - defer func() { _ = bdy.Body.Close() }() + defer bdy.Body.Close() } else { verb = "created" // Create new resource @@ -233,7 +233,7 @@ func applyPulsarInstance(ctx context.Context, apiClient *sncloud.APIClient, json request = request.DryRun(dryRunStr) } _, bdy, err = request.Execute() - defer func() { _ = bdy.Body.Close() }() + defer bdy.Body.Close() } if err != nil { @@ -277,7 +277,7 @@ func applyPulsarCluster(ctx context.Context, apiClient *sncloud.APIClient, jsonC if name != "" { // Try to get existing resource existingCluster, bdy, err := apiClient.CloudStreamnativeIoV1alpha1Api.ReadCloudStreamnativeIoV1alpha1NamespacedPulsarCluster(ctx, name, organization).Execute() - defer func() { _ = bdy.Body.Close() }() + defer bdy.Body.Close() if err == nil { exists = true if existingCluster.Metadata != nil && existingCluster.Metadata.ResourceVersion != nil { @@ -311,7 +311,7 @@ func applyPulsarCluster(ctx context.Context, apiClient *sncloud.APIClient, jsonC } _, bdy, err = request.Execute() - defer func() { _ = bdy.Body.Close() }() + defer bdy.Body.Close() } else { verb = "created" // Create new resource @@ -321,7 +321,7 @@ func applyPulsarCluster(ctx context.Context, apiClient *sncloud.APIClient, jsonC request = request.DryRun(dryRunStr) } _, bdy, err = request.Execute() - defer func() { _ = bdy.Body.Close() }() + defer bdy.Body.Close() } if err != nil { diff --git a/pkg/pulsar/connection.go b/pkg/pulsar/connection.go index 1b43b95..578dff7 100644 --- a/pkg/pulsar/connection.go +++ b/pkg/pulsar/connection.go @@ -25,11 +25,10 @@ import ( ) const ( - // DefaultClientTimeout is the default timeout for Pulsar client operations. DefaultClientTimeout = 30 * time.Second ) -// PulsarContext holds configuration for connecting to a Pulsar cluster. +//nolint:revive type PulsarContext struct { ServiceURL string WebServiceURL string diff --git a/pkg/schema/avro_core.go b/pkg/schema/avro_core.go index 6d8da4d..be84a53 100644 --- a/pkg/schema/avro_core.go +++ b/pkg/schema/avro_core.go @@ -63,7 +63,7 @@ func avroFieldToMcpOption(field *avro.Field) (mcp.ToolOption, error) { } isRequired := true - underlyingTypeForDefault := fieldType // Used to check default value against non-union type + var underlyingTypeForDefault avro.Schema = fieldType // Used to check default value against non-union type if unionSchema, ok := fieldType.(*avro.UnionSchema); ok { isNullAble := false diff --git a/pkg/schema/avro_test.go b/pkg/schema/avro_test.go index 5a125a8..58e181c 100644 --- a/pkg/schema/avro_test.go +++ b/pkg/schema/avro_test.go @@ -349,11 +349,10 @@ func TestAvroConverter_SerializeMCPRequestToPulsarPayload(t *testing.T) { var schemaToUse string var argsToMarshal map[string]interface{} - switch tt.schemaInfo.Name { - case "SimpleAvroSerialize": + if tt.schemaInfo.Name == "SimpleAvroSerialize" { schemaToUse = simpleRecordSchema argsToMarshal = tt.args - case "ComplexAvroSerialize": + } else if tt.schemaInfo.Name == "ComplexAvroSerialize" { schemaToUse = complexRecordSchemaString complexArgsCopy := make(map[string]interface{}) for k, v := range tt.args { From 6e39f9fc91078051e4d879bab1e427ff06fc3632 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Mon, 12 Jan 2026 13:37:38 +0800 Subject: [PATCH 08/13] ci(e2e): add cluster name to Kind setup and package doc --- .github/workflows/e2e.yaml | 2 ++ cmd/snmcp-e2e/main.go | 1 + 2 files changed, 3 insertions(+) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index fbeb483..9e9a029 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -35,6 +35,8 @@ jobs: - name: Set up Kind uses: helm/kind-action@v1 + with: + cluster_name: kind - name: Download dependencies run: go mod download diff --git a/cmd/snmcp-e2e/main.go b/cmd/snmcp-e2e/main.go index e178077..43d2f08 100644 --- a/cmd/snmcp-e2e/main.go +++ b/cmd/snmcp-e2e/main.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package main is the entry point for the StreamNative MCP E2E test. package main import ( From 0c426617f9de6b54bb14a0fabbee18d5283e9d72 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Mon, 12 Jan 2026 14:47:26 +0800 Subject: [PATCH 09/13] ci(e2e): simplify E2E workflow by consolidating steps --- .github/workflows/e2e.yaml | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 9e9a029..34bf4de 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -41,17 +41,8 @@ jobs: - name: Download dependencies run: go mod download - - name: Start Pulsar - run: ./scripts/e2e-test.sh setup-pulsar - - - name: Build and load snmcp image - run: ./scripts/e2e-test.sh build-image - - - name: Deploy snmcp - run: ./scripts/e2e-test.sh deploy-mcp - - name: Run E2E tests - run: ./scripts/e2e-test.sh run-tests + run: ./scripts/e2e-test.sh all - name: Cleanup if: always() From 6a0d9c1cc4f23df1308fc04ef1b139063b7ec612 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Mon, 12 Jan 2026 22:56:00 +0800 Subject: [PATCH 10/13] Update charts/snmcp/templates/configmap.yaml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- charts/snmcp/templates/configmap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/snmcp/templates/configmap.yaml b/charts/snmcp/templates/configmap.yaml index b355b8e..9d273a0 100644 --- a/charts/snmcp/templates/configmap.yaml +++ b/charts/snmcp/templates/configmap.yaml @@ -9,7 +9,7 @@ metadata: labels: {{- include "snmcp.labels" . | nindent 4 }} data: - SNMCP_PULSAR_WEB_SERVICE_URL: {{ .Values.pulsar.webServiceURL | quote }} + SNMCP_PULSAR_WEB_SERVICE_URL: {{ required "pulsar.webServiceURL is required" .Values.pulsar.webServiceURL | quote }} {{- if .Values.pulsar.serviceURL }} SNMCP_PULSAR_SERVICE_URL: {{ .Values.pulsar.serviceURL | quote }} {{- end }} From 7dc89cca712768dbf9e1cc20f679a4b5cd221b1f Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Mon, 12 Jan 2026 22:56:34 +0800 Subject: [PATCH 11/13] Update pkg/cmd/mcp/sse.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- pkg/cmd/mcp/sse.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/mcp/sse.go b/pkg/cmd/mcp/sse.go index b08e4ca..1540abd 100644 --- a/pkg/cmd/mcp/sse.go +++ b/pkg/cmd/mcp/sse.go @@ -256,12 +256,14 @@ func joinHTTPPath(basePath string, suffix string) string { func healthHandler(status string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { + if r.Method != http.MethodGet && r.Method != http.MethodHead { w.WriteHeader(http.StatusMethodNotAllowed) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(status)) + if r.Method == http.MethodGet { + _, _ = w.Write([]byte(status)) + } } } From 03a7a437b4f3ccfb47f31e1448fe6abd6885b700 Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Mon, 12 Jan 2026 23:32:24 +0800 Subject: [PATCH 12/13] docs(helm): clarify configDir must be writable --- charts/snmcp/README.md | 1 + charts/snmcp/templates/deployment.yaml | 1 + charts/snmcp/values.yaml | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/charts/snmcp/README.md b/charts/snmcp/README.md index 592a782..332b8ef 100644 --- a/charts/snmcp/README.md +++ b/charts/snmcp/README.md @@ -31,6 +31,7 @@ helm install snmcp ./charts/snmcp \ | `server.features` | `[]` | Features to enable (default: all-pulsar) | | `server.httpAddr` | `:9090` | HTTP server address | | `server.httpPath` | `/mcp` | Base path for SSE/message/health endpoints | +| `server.configDir` | `/var/lib/snmcp` | Config directory for snmcp state (must be writable) | ### Session Configuration diff --git a/charts/snmcp/templates/deployment.yaml b/charts/snmcp/templates/deployment.yaml index 31474a0..cc22c88 100644 --- a/charts/snmcp/templates/deployment.yaml +++ b/charts/snmcp/templates/deployment.yaml @@ -111,6 +111,7 @@ spec: {{- if .Values.server.configDir }} - name: config-dir mountPath: {{ .Values.server.configDir | quote }} + readOnly: false {{- end }} {{- if and .Values.pulsar.tls.enabled .Values.pulsar.tls.secretName }} - name: pulsar-tls diff --git a/charts/snmcp/values.yaml b/charts/snmcp/values.yaml index 6c642d9..6246fc5 100644 --- a/charts/snmcp/values.yaml +++ b/charts/snmcp/values.yaml @@ -22,7 +22,7 @@ server: httpAddr: ":9090" # HTTP server base path for SSE, message, and health endpoints httpPath: "/mcp" - # Config directory for snmcp state + # Config directory for snmcp state (must be writable) configDir: "/var/lib/snmcp" # Pulsar cluster configuration (required) From af7151df8bbee11d225faf7bd9191ecc8464c99e Mon Sep 17 00:00:00 2001 From: Rui Fu Date: Tue, 13 Jan 2026 12:05:22 +0800 Subject: [PATCH 13/13] refactor(helm): remove hardcoded httpAddr and clarify port usage --- charts/snmcp/README.md | 3 ++- charts/snmcp/templates/configmap.yaml | 1 - charts/snmcp/templates/deployment.yaml | 2 -- charts/snmcp/values.yaml | 3 +-- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/charts/snmcp/README.md b/charts/snmcp/README.md index 332b8ef..2078146 100644 --- a/charts/snmcp/README.md +++ b/charts/snmcp/README.md @@ -29,10 +29,11 @@ helm install snmcp ./charts/snmcp \ |-----------|---------|-------------| | `server.readOnly` | `false` | Enable read-only mode | | `server.features` | `[]` | Features to enable (default: all-pulsar) | -| `server.httpAddr` | `:9090` | HTTP server address | | `server.httpPath` | `/mcp` | Base path for SSE/message/health endpoints | | `server.configDir` | `/var/lib/snmcp` | Config directory for snmcp state (must be writable) | +The container listens on port 9090. Use `service.port` or Ingress to expose a different port. + ### Session Configuration | Parameter | Default | Description | diff --git a/charts/snmcp/templates/configmap.yaml b/charts/snmcp/templates/configmap.yaml index 9d273a0..8bab261 100644 --- a/charts/snmcp/templates/configmap.yaml +++ b/charts/snmcp/templates/configmap.yaml @@ -15,7 +15,6 @@ data: {{- end }} SNMCP_SESSION_CACHE_SIZE: {{ .Values.session.cacheSize | quote }} SNMCP_SESSION_TTL_MINUTES: {{ .Values.session.ttlMinutes | quote }} - SNMCP_HTTP_ADDR: {{ .Values.server.httpAddr | quote }} SNMCP_HTTP_PATH: {{ .Values.server.httpPath | quote }} SNMCP_CONFIG_DIR: {{ .Values.server.configDir | quote }} SNMCP_READ_ONLY: {{ .Values.server.readOnly | quote }} diff --git a/charts/snmcp/templates/deployment.yaml b/charts/snmcp/templates/deployment.yaml index cc22c88..d702586 100644 --- a/charts/snmcp/templates/deployment.yaml +++ b/charts/snmcp/templates/deployment.yaml @@ -50,8 +50,6 @@ spec: - "$(SNMCP_SESSION_CACHE_SIZE)" - --session-ttl-minutes - "$(SNMCP_SESSION_TTL_MINUTES)" - - --http-addr - - "$(SNMCP_HTTP_ADDR)" - --http-path - "$(SNMCP_HTTP_PATH)" {{- if .Values.server.readOnly }} diff --git a/charts/snmcp/values.yaml b/charts/snmcp/values.yaml index 6246fc5..6085996 100644 --- a/charts/snmcp/values.yaml +++ b/charts/snmcp/values.yaml @@ -18,8 +18,6 @@ server: # Features to enable (e.g., "all-pulsar", "pulsar-admin", "pulsar-client") # Leave empty to use default (all-pulsar) features: [] - # HTTP server address - httpAddr: ":9090" # HTTP server base path for SSE, message, and health endpoints httpPath: "/mcp" # Config directory for snmcp state (must be writable) @@ -58,6 +56,7 @@ session: # Kubernetes Service configuration service: type: ClusterIP + # Port exposed by the Service (container listens on 9090) port: 9090 annotations: {}