diff --git a/.github/workflows/chart-lint-publish.yml b/.github/workflows/chart-lint-publish.yml new file mode 100644 index 0000000..a744ab5 --- /dev/null +++ b/.github/workflows/chart-lint-publish.yml @@ -0,0 +1,62 @@ +name: Validate / Publish helm charts + +on: + release: + types: [published] + pull_request: + types: [opened, reopened, synchronize] + paths: + - 'helm/**' + workflow_dispatch: + inputs: + IGNORE_CHARTS: + description: 'Provide list of charts to be ignored separated by pipe(|)' + required: false + default: '""' + type: string + CHART_PUBLISH: + description: 'Chart publishing to gh-pages branch' + required: false + default: 'NO' + type: string + options: + - YES + - NO + INCLUDE_ALL_CHARTS: + description: 'Include all charts for Linting/Publishing (YES/NO)' + required: false + default: 'NO' + type: string + options: + - YES + - NO + push: + branches: + - '!release-branch' + - '!master' + - 1.* + - 0.* + - develop + - MOSIP* + - release* + paths: + - 'helm/**' + +jobs: + chart-lint-publish: + uses: mosip/kattu/.github/workflows/chart-lint-publish.yml@master + with: + CHARTS_DIR: ./helm + CHARTS_URL: https://mosip.github.io/mosip-helm + REPOSITORY: mosip-helm + BRANCH: gh-pages + INCLUDE_ALL_CHARTS: "${{ inputs.INCLUDE_ALL_CHARTS || 'NO' }}" + IGNORE_CHARTS: "${{ inputs.IGNORE_CHARTS || '\"\"' }}" + CHART_PUBLISH: "${{ inputs.CHART_PUBLISH || 'YES' }}" + LINTING_CHART_SCHEMA_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/chart-schema.yaml" + LINTING_LINTCONF_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/lintconf.yaml" + LINTING_CHART_TESTING_CONFIG_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/chart-testing-config.yaml" + LINTING_HEALTH_CHECK_SCHEMA_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/health-check-schema.yaml" + secrets: + TOKEN: ${{ secrets.ACTION_PAT }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} diff --git a/.github/workflows/push-trigger.yml b/.github/workflows/push-trigger.yml new file mode 100644 index 0000000..4ffaf02 --- /dev/null +++ b/.github/workflows/push-trigger.yml @@ -0,0 +1,47 @@ +name: Building Security Tools + +on: + release: + types: [published] + pull_request: + types: [opened, reopened, synchronize] + workflow_dispatch: + inputs: + message: + description: 'Message for manually triggering' + required: false + default: 'Triggered for Updates' + type: string + push: + branches: + - master + - 1.* + - develop* + - release* + - MOSIP* + - update + +jobs: + build-dockers: + strategy: + matrix: + include: + - SERVICE_LOCATION: 'databreachdetector' + SERVICE_NAME: 'databreachdetector' + - SERVICE_LOCATION: 'certmanager' + SERVICE_NAME: 'certmanager' + - SERVICE_LOCATION: 'auditsweeper' + SERVICE_NAME: 'auditsweeper' + ONLY_DOCKER: true + fail-fast: false + name: ${{ matrix.SERVICE_NAME }} + uses: mosip/kattu/.github/workflows/docker-build.yml@master + with: + SERVICE_LOCATION: ${{ matrix.SERVICE_LOCATION }} + SERVICE_NAME: ${{ matrix.SERVICE_NAME }} + ONLY_DOCKER: ${{ matrix.ONLY_DOCKER }} + secrets: + DEV_NAMESPACE_DOCKER_HUB: ${{ secrets.DEV_NAMESPACE_DOCKER_HUB }} + ACTOR_DOCKER_HUB: ${{ secrets.ACTOR_DOCKER_HUB }} + RELEASE_DOCKER_HUB: ${{ secrets.RELEASE_DOCKER_HUB }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_DEVOPS }} diff --git a/.github/workflows/sonar-check.yml b/.github/workflows/sonar-check.yml new file mode 100644 index 0000000..992cf1c --- /dev/null +++ b/.github/workflows/sonar-check.yml @@ -0,0 +1,41 @@ +name: SonarCloud PR Quality Gate + +on: + push: + branches: + - develop + +jobs: + sonar_analysis: + name: maven-sonar-analysis + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up JDK 21 + uses: actions/setup-java@v4 + with: + java-version: 21 + distribution: 'temurin' + + - name: Cache SonarCloud packages + uses: actions/cache@v4 + with: + path: ~/.sonar/cache + key: ${{ runner.os }}-sonar + restore-keys: ${{ runner.os }}-sonar + + - name: Cache Maven packages + uses: actions/cache@v4 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2 + + - name: Run SonarCloud analysis + env: + SONAR_TOKEN: f4e496ee8ddc6661404844949201593f56078e94 + run: | + mvn -B verify sonar:sonar -Dsonar.projectKey=mosip_security-tools -Dsonar.organization=mosip -Dsonar.host.url=https://sonarcloud.io -DskipSigning=true diff --git a/README.md b/README.md deleted file mode 100644 index 1f09f06..0000000 --- a/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# security-tools -Repository containing required security tool's code for MOSIP. diff --git a/auditsweeper/Dockerfile b/auditsweeper/Dockerfile new file mode 100644 index 0000000..12dc8f3 --- /dev/null +++ b/auditsweeper/Dockerfile @@ -0,0 +1,39 @@ +FROM python:3.9 + +ARG SOURCE +ARG COMMIT_HASH +ARG COMMIT_ID +ARG BUILD_TIME +LABEL source=${SOURCE} +LABEL commit_hash=${COMMIT_HASH} +LABEL commit_id=${COMMIT_ID} +LABEL build_time=${BUILD_TIME} + +ARG container_user=mosip +ARG container_user_group=mosip +ARG container_user_uid=1001 +ARG container_user_gid=1001 + +# Create the user and set the working directory +RUN groupadd -r ${container_user_group} && useradd -u ${container_user_uid} -r -g ${container_user_group} -s /bin/bash -m -d /home/${container_user} ${container_user} + +WORKDIR /home/${container_user} + +# Add all files to the correct working directory +ADD . . + +# Install kubectl and Python dependencies +RUN apt-get -y update && apt-get install -y curl \ +&& curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.sio/release/stable.txt)/bin/linux/amd64/kubectl" \ +&& chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl \ +&& pip install --no-cache-dir -r requirements.txt \ +&& chown -R ${container_user}:${container_user_group} /home/${container_user} + +USER ${container_user} + +ENV db-server= +ENV db-port= +ENV db-su-user= +ENV postgres-password= + +CMD ["python", "auditsweeper.py"] \ No newline at end of file diff --git a/auditsweeper/auditsweeper.py b/auditsweeper/auditsweeper.py new file mode 100644 index 0000000..91de6ab --- /dev/null +++ b/auditsweeper/auditsweeper.py @@ -0,0 +1,87 @@ +import os +import sys +import configparser +import psycopg2 + +# This script performs a cleanup of old log entries from a PostgreSQL database. +# It is designed to be run as a Docker container via a cron job. + +def get_db_credentials(): + """ + Attempts to get database credentials from environment variables. + If not found, falls back to a local.properties file. + """ + # List of required variables + required_vars = [ + "db-host", "db-port", "db-su-user", + "postgres-password", "log-age-days" + ] + + env_vars = {var: os.getenv(var) for var in required_vars} + + # Check if all environment variables are set + if all(env_vars.values()): + print("Using credentials from environment variables.") + return env_vars + else: + print("One or more required environment variables are not set. Checking for local.properties...") + config = configparser.ConfigParser() + config_file = "local.properties" + + if not os.path.exists(config_file): + print(f"Error: Required variables not set and '{config_file}' not found.") + sys.exit(1) + + try: + # Read the properties file, assuming a single section + config.read_string(f"[DEFAULT]\n{open(config_file).read()}") + props = config['DEFAULT'] + + # Populate variables from the properties file + return {var: props.get(var) for var in required_vars} + except configparser.Error as e: + print(f"Error reading local.properties file: {e}") + sys.exit(1) + +def cleanup_db(config): + """ + Connects to the database and performs the cleanup operation. + """ + db_name = "mosip_audit" + try: + conn = psycopg2.connect( + host=config["db-host"], + port=config["db-port"], + user=config["db-su-user"], + password=config["postgres-password"], + dbname=db_name + ) + cur = conn.cursor() + + print(f"Starting database cleanup for logs older than {config['log-age-days']} days...") + print(f"Connecting to DB: {config['db-su-user']}@{config['db-host']}:{config['db-port']}/{db_name}") + + # The core DELETE command + # Use a parameterized query for safety + delete_query = "DELETE FROM audit.app_audit_log WHERE log_dtimes < NOW() - INTERVAL %s" + interval_str = f"{config['log-age-days']} days" + + cur.execute(delete_query, (interval_str,)) + + # Get the number of rows deleted + rows_deleted = cur.rowcount + conn.commit() + + print(f"Successfully deleted {rows_deleted} rows.") + + except psycopg2.OperationalError as e: + print(f"Database connection or query failed: {e}") + sys.exit(1) + finally: + if 'conn' in locals() and conn: + conn.close() + +if __name__ == "__main__": + db_config = get_db_credentials() + cleanup_db(db_config) + print("Database cleanup script finished successfully.") \ No newline at end of file diff --git a/auditsweeper/local.properties b/auditsweeper/local.properties new file mode 100644 index 0000000..984fda7 --- /dev/null +++ b/auditsweeper/local.properties @@ -0,0 +1,5 @@ +db-host=postgres.dev1.mosip.net +db-port=5432 +db-su-user=postgres +postgres-password=HEdM***9ZXir7Tu2F +log-age-days=85 \ No newline at end of file diff --git a/auditsweeper/requirements.txt b/auditsweeper/requirements.txt new file mode 100644 index 0000000..ee92c0e --- /dev/null +++ b/auditsweeper/requirements.txt @@ -0,0 +1 @@ +psycopg2-binary==2.9.1 \ No newline at end of file diff --git a/certmanager/Dockerfile b/certmanager/Dockerfile new file mode 100644 index 0000000..cbfef22 --- /dev/null +++ b/certmanager/Dockerfile @@ -0,0 +1,46 @@ +FROM python:3.9 + +ARG SOURCE +ARG COMMIT_HASH +ARG COMMIT_ID +ARG BUILD_TIME +LABEL source=${SOURCE} +LABEL commit_hash=${COMMIT_HASH} +LABEL commit_id=${COMMIT_ID} +LABEL build_time=${BUILD_TIME} + +ARG container_user=mosip +ARG container_user_group=mosip +ARG container_user_uid=1001 +ARG container_user_gid=1001 + +# Install kubectl binary +RUN apt-get -y update \ + && apt-get install -y curl \ + && curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ + && chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl + +# Create user group +RUN groupadd -r ${container_user_group} && useradd -u ${container_user_uid} -r -g ${container_user_group} -s /bin/bash -m -d /home/${container_user} ${container_user} +RUN chown -R ${container_user}:${container_user} /home/${container_user} +WORKDIR /home/${container_user} +USER ${container_user} + +ENV MYDIR=`pwd` +ENV DATE="$(date --utc +%FT%T.%3NZ)" +ENV ENABLE_INSECURE=false +ENV MODULE= + + +ENV db-server= +ENV db-port= +ENV db-su-user= +ENV postgres-password= +ENV ns_esignet=esignet + +COPY partner.properties . +COPY requirements.txt . +COPY checkupdate.py . +COPY bootstrap.properties . +RUN pip install --no-cache-dir -r requirements.txt +CMD ["python", "checkupdate.py"] \ No newline at end of file diff --git a/certmanager/README.md b/certmanager/README.md new file mode 100644 index 0000000..a342c40 --- /dev/null +++ b/certmanager/README.md @@ -0,0 +1,86 @@ +# Certificate Renewal Script for MOSIP + +This script automates the process of checking and renewing certificates for MOSIP's partners. + +## Features + +* The Script Reads partner IDs from either: + + * `PARTNER_IDS_ENV` environment variable when running in a k8s cluster. + * `partner.properties` file while running locally. + +* Checks certificate expiry and renews if expired or within the `pre-expiry-days` window. +* Uploads renewed certificates to PMS and propagates to dependent systems: + + * **eSignet** for `mpartner-default-esignet` + * **IDA** for `mpartner-default-auth` + * **KeyManager** for `mpartner-default-resident` and `mpartner-default-digitalcard` + * * **PMS** for all other kinds of partners. (including external 3rd party partners) +* Logs actions and results for each partner. + +## Configuration + +The script reads configuration values from either environment variables or `bootstrap.properties` file. + +### Environment Variables (preferred) + +| Variable Name | Description | +| -------------------------------- | ------------------------------------------ | +| `db-host` | PostgreSQL host | +| `db-port` | PostgreSQL port | +| `db-su-user` | PostgreSQL superuser | +| `postgres-password` | PostgreSQL password | +| `mosip-api-internal-host` | Internal MOSIP API base host | +| `mosip_deployment_client_secret` | MOSIP PMS client secret for authentication | +| `pre-expiry-days` | Days before expiry to trigger renewal | +| `PARTNER_IDS_ENV` | Comma-separated partner IDs to process | + +### bootstrap.properties (fallback) + +Provide the same keys as above in `bootstrap.properties` if environment variables are not set. + +Example: + +``` +db-host=localhost +db-port=5432 +db-su-user=postgres +postgres-password=postgres +mosip-api-internal-host=api-internal.mosip.net +mosip_deployment_client_secret=secret-key +pre-expiry-days=30 +``` + +### partner.properties + +List of sample partner IDs to process when `PARTNER_IDS_ENV` is not set: + +``` +PARTNER_ID=mpartner-default-auth,mpartner-default-esignet,mpartner-default-resident +``` + +## Running the Script + +### Python (local) + +```bash +python checkupdate.py +``` + + + +## Outputs + +* Logs certificate renewal process to stdout. +* Writes expired partner IDs to `expired.txt`. +* Automatically uploads renewed certificates to appropriate systems. + + +## Notes + +* Ensure PostgreSQL credentials and MOSIP API host are reachable. +* Certificates are checked for expiry using OpenSSL and renewal occurs if expiring within the configured pre-expiry window. +* The script prints detailed progress and failures for each step. + +## WIP + * Currently the script can not handle IDA- CRED certificates, team is working towards fixing the same. diff --git a/certmanager/bootstrap.properties b/certmanager/bootstrap.properties new file mode 100644 index 0000000..8391d3d --- /dev/null +++ b/certmanager/bootstrap.properties @@ -0,0 +1,10 @@ +[Database] +db-host = postgres.sandbox.mosip.net +db-port = 5432 +db-su-user = postgres +postgres-password = HEdMa9Z****Tu** +[API] +mosip-api-internal-host=api-internal.sandbox.mosip.net +mosip_deployment_client_secret=w9xp2****Q4N1709B +pre-expiry-days=40 +mosip-api-external-host=api.sandbox.mosip.net diff --git a/certmanager/checkupdate.py b/certmanager/checkupdate.py new file mode 100644 index 0000000..ef3275d --- /dev/null +++ b/certmanager/checkupdate.py @@ -0,0 +1,271 @@ +import os +import json +import psycopg2 +import requests +import subprocess +from urllib.request import Request, urlopen +from urllib.error import HTTPError +from datetime import datetime, timedelta, timezone +from configparser import ConfigParser + +# Function to read value from bootstrap.properties +def read_bootstrap_properties(key): + with open('bootstrap.properties', 'r') as file: + for line in file: + if line.startswith(key): + return line.split('=')[1].strip() + return None + +# Function to check if certificate is expired +def is_certificate_expired(expiration_date): + expiration_date = datetime.strptime(expiration_date, "%b %d %H:%M:%S %Y %Z") + current_date = datetime.utcnow() + return current_date > expiration_date + +# Function to write expired certificates to a text file +def write_to_expired_txt(cert_name): + with open('expired.txt', 'a') as file: + file.write(cert_name + '\n') + +# Function to format certificate data +def format_certificate(cert_data): + return cert_data.replace("\n", "\\n") + +# Function to retrieve certificate data from the database +def retrieve_certificate_data(partner_id, db_host, db_port, db_user, db_password): + try: + pms_conn = psycopg2.connect( + host=db_host, + port=db_port, + database="mosip_pms", + user=db_user, + password=db_password + ) + pms_cursor = pms_conn.cursor() + sql_query_cert_alias = f"SELECT certificate_alias FROM pms.partner WHERE id = '{partner_id}';" + pms_cursor.execute(sql_query_cert_alias) + certificate_alias = pms_cursor.fetchone()[0] + + sql_query_cert_data = f"SELECT cert_data FROM keymgr.partner_cert_store WHERE cert_id = '{certificate_alias}';" + keymgr_conn = psycopg2.connect( + host=db_host, + port=db_port, + database="mosip_keymgr", + user=db_user, + password=db_password + ) + keymgr_cursor = keymgr_conn.cursor() + keymgr_cursor.execute(sql_query_cert_data) + cert_data = keymgr_cursor.fetchone()[0] + + formatted_cert_data = format_certificate(cert_data) + + pms_cursor.close() + pms_conn.close() + keymgr_cursor.close() + keymgr_conn.close() + + return formatted_cert_data + except Exception as e: + print(f"Error retrieving certificate data for Partner ID '{partner_id}': {str(e)}") + return None + +# Function to get current UTC time in ISO 8601 format with milliseconds +def get_utc_timestamp(): + return datetime.utcnow().replace(tzinfo=timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z') + +# Function to authenticate and retrieve the token +def authenticate_and_get_token(base_url, client_secret): + auth_url = f"https://{base_url}/v1/authmanager/authenticate/clientidsecretkey" + headers = {"Content-Type": "application/json"} + auth_data = { + "id": "string", + "metadata": {}, + "request": { + "appId": "ida", + "clientId": "mosip-deployment-client", + "secretKey": client_secret + }, + "requesttime": get_utc_timestamp(), + "version": "string" + } + response = requests.post(auth_url, headers=headers, json=auth_data) + if response.status_code == 200: + return response.headers.get("authorization") + print("Authentication failed.") + return None + +# Function to upload certificate +# Returns signedCertificateData if successful +def upload_certificate_with_token(token, cert_data, partner_id, base_url): + upload_url = f"https://{base_url}/v1/partnermanager/partners/certificate/upload" + headers = {"Content-Type": "application/json", "Cookie": f"Authorization={token}"} + partner_domain = "MISP" if partner_id == "mpartner-default-esignet" else "AUTH" + upload_data = { + "id": "string", + "metadata": {}, + "request": { + "certificateData": cert_data.replace("\\n", "\n"), + "partnerDomain": partner_domain, + "partnerId": partner_id + }, + "requesttime": get_utc_timestamp(), + "version": "string" + } + response = requests.post(upload_url, headers=headers, json=upload_data) + if "certificateId" not in response.text: + print(f"[{partner_id}] Certificate renewal failed.") + return None + return json.loads(response.text)['response']['signedCertificateData'] + +# Function to post-upload to dependent systems +def post_upload_to_system(endpoint, token, app_id, cert_data, reference_id, bearer=False): + if bearer: + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {token}" + } + else: + headers = { + "Content-Type": "application/json", + "Cookie": f"Authorization={token}" + } + + payload = { + "request": { + "certificateData": cert_data, + "applicationId": app_id, + "referenceId": reference_id + }, + "requestTime": get_utc_timestamp() + } + + response = requests.post(endpoint, headers=headers, json=payload) + + if response.status_code == 200 and 'Upload Success' in response.text: + print(f"[{partner_id}] certificate uploaded back to [{app_id}] successfully.") + return True + else: + print(f"[{partner_id}] certificate upload back to [{app_id}] failed.") + return False + +# Load configuration +postgres_host = os.environ.get('db-host') or read_bootstrap_properties('db-host') +postgres_port = os.environ.get('db-port') or read_bootstrap_properties('db-port') +postgres_user = os.environ.get('db-su-user') or read_bootstrap_properties('db-su-user') +postgres_password = os.environ.get('postgres-password') or read_bootstrap_properties('postgres-password') +base_url = os.environ.get('mosip-api-internal-host') or read_bootstrap_properties('mosip-api-internal-host') +base_esignet_url = os.environ.get('mosip-api-host') or read_bootstrap_properties('mosip-api-external-host') +client_secret = os.environ.get('mosip_deployment_client_secret') or read_bootstrap_properties('mosip_deployment_client_secret') +pre_expiry_days = int(os.environ.get('pre-expiry-days') or read_bootstrap_properties('pre-expiry-days')) +ns_esignet = os.environ.get('ns_esignet') +TOKEN = authenticate_and_get_token(base_url, client_secret) + +if TOKEN: + partner_ids = os.environ.get('PARTNER_IDS_ENV') + if partner_ids: + partner_ids = partner_ids.split(',') + print("Getting list of partners from env variable") + else: + with open('partner.properties', 'r') as file: + for line in file: + if line.startswith('PARTNER_ID'): + partner_ids = line.strip().split('=')[1].split(',') + print("Getting list of partners from local variable") + + for PARTNER_ID in partner_ids: + PARTNER_ID = PARTNER_ID.strip() + print(f"\nProcessing partner ID: {PARTNER_ID}") + try: + req = Request( + f"https://{base_url}/v1/partnermanager/partners/{PARTNER_ID}/certificate", + headers={"Content-Type": "application/json", "Cookie": f"Authorization={TOKEN}"}, + method="GET" + ) + response = urlopen(req) + raw_data = response.read().decode('utf-8') + try: + response_data = json.loads(raw_data) + except json.JSONDecodeError: + print(f"[{PARTNER_ID}] Invalid JSON response.") + continue + + if not response_data or not isinstance(response_data, dict): + print(f"[{PARTNER_ID}] Invalid or empty response.") + continue + + cert_info = response_data.get('response') + CERTIFICATE_DATA = cert_info.get('certificateData') if cert_info else None + + if not CERTIFICATE_DATA: + print(f"[{PARTNER_ID}] Certificate data not found.") + continue + + expiration_date = os.popen(f"echo '{CERTIFICATE_DATA}' | openssl x509 -noout -enddate").read().split('=')[1].strip() + expiry_dt = datetime.strptime(expiration_date, "%b %d %H:%M:%S %Y %Z") + days_left = (expiry_dt - datetime.utcnow()).days + + if is_certificate_expired(expiration_date) or days_left <= int(pre_expiry_days): + print(f"[{PARTNER_ID}] Certificate is expired or will expire in {days_left} day(s). Renewing...") + write_to_expired_txt(PARTNER_ID) + else: + print(f"[{PARTNER_ID}] Certificate is valid. {days_left} day(s) left.") + + except HTTPError as e: + print(f"[{PARTNER_ID}] HTTP error while fetching certificate: {e}") + continue + except Exception as e: + print(f"[{PARTNER_ID}] Unexpected error: {e}") + continue + + if os.path.exists("expired.txt"): + with open("expired.txt", "r") as file: + expired_partner_ids = [line.strip() for line in file if line.strip()] + else: + expired_partner_ids = [] + + for partner_id in expired_partner_ids: + cert_data = retrieve_certificate_data(partner_id, postgres_host, postgres_port, postgres_user, postgres_password) + if not cert_data: + continue + + try: + pem = cert_data.replace("\\n", "\n") + end_date_str = os.popen(f"echo '{pem}' | openssl x509 -noout -enddate").read().split('=')[1].strip() + end_date = datetime.strptime(end_date_str, "%b %d %H:%M:%S %Y %Z") + if (end_date - datetime.utcnow()).days < 365: + print(f"DB cert for {partner_id} has less than 365 days left. Skipping.") + continue + except Exception as e: + print(f"Error validating DB cert for {partner_id}: {e}") + continue + + signed_cert = upload_certificate_with_token(TOKEN, cert_data, partner_id, base_url) + if not signed_cert: + continue + + # Post-upload to relevant systems + success = True + if partner_id == 'mpartner-default-esignet': + success = post_upload_to_system(f"https://{base_esignet_url}/v1/esignet/system-info/uploadCertificate", TOKEN, "OIDC_PARTNER", signed_cert, "", bearer=True) + if success: + if ns_esignet: + subprocess.run(["kubectl", "rollout", "restart", "deployment", "esignet", "-n", ns_esignet], check=True) + else: + print("Environment variable 'ns_esignet' not set. Cannot restart esignet deployment.") + else: + print(f"[{partner_id}] Upload to Esignet failed. Skipping restart.") + elif partner_id == 'mpartner-default-digitalcard': + success = post_upload_to_system(f"https://{base_url}/v1/keymanager/uploadCertificate", TOKEN, "DIGITAL_CARD", signed_cert, partner_id) + elif partner_id == 'mpartner-default-auth': + success = post_upload_to_system(f"https://{base_url}/idauthentication/v1/internal/uploadCertificate", TOKEN, "IDA", signed_cert, partner_id) + elif partner_id == 'mpartner-default-resident': + success = post_upload_to_system(f"https://{base_url}/v1/keymanager/uploadCertificate", TOKEN, "RESIDENT", signed_cert, partner_id) + + if success or partner_id not in [ + 'mpartner-default-esignet', 'mpartner-default-digitalcard', 'mpartner-default-auth', 'mpartner-default-resident']: + print(f"[{partner_id}] certificate renewed successfully and will be valid for 1 more year.") + + print("MOSIP Certificate Manager Run Completed.") +else: + print("Failed to get auth-token") diff --git a/certmanager/partner.properties b/certmanager/partner.properties new file mode 100644 index 0000000..cc3cba8 --- /dev/null +++ b/certmanager/partner.properties @@ -0,0 +1 @@ +PARTNER_ID=mpartner-default-resident,mpartner-default-abis \ No newline at end of file diff --git a/certmanager/requirements.txt b/certmanager/requirements.txt new file mode 100644 index 0000000..33678ea --- /dev/null +++ b/certmanager/requirements.txt @@ -0,0 +1,2 @@ +psycopg2-binary==2.9.1 +requests==2.26.0 \ No newline at end of file diff --git a/databreachdetector/Dockerfile b/databreachdetector/Dockerfile new file mode 100644 index 0000000..61a7fa5 --- /dev/null +++ b/databreachdetector/Dockerfile @@ -0,0 +1,42 @@ +FROM python:3.9 + +ARG SOURCE +ARG COMMIT_HASH +ARG COMMIT_ID +ARG BUILD_TIME +LABEL source=${SOURCE} +LABEL commit_hash=${COMMIT_HASH} +LABEL commit_id=${COMMIT_ID} +LABEL build_time=${BUILD_TIME} + +ARG container_user=mosip +ARG container_user_group=mosip +ARG container_user_uid=1001 +ARG container_user_gid=1001 + +# Create user group +RUN groupadd -r ${container_user_group} && useradd -u ${container_user_uid} -r -g ${container_user_group} -s /bin/bash -m -d /home/${container_user} ${container_user} +RUN chown -R ${container_user}:${container_user} /home/${container_user} +WORKDIR /home/${container_user} +USER ${container_user} + +ENV MYDIR=`pwd` +ENV DATE="$(date --utc +%FT%T.%3NZ)" +ENV ENABLE_INSECURE=false +ENV MODULE= + +ENV s3-host= +ENV s3-region= +ENV s3-user-key= +ENV s3-user-secret= +ENV s3-bucket-name= +ENV db-server= +ENV db-port= +ENV db-su-user= +ENV postgres-password= + +COPY requirements.txt . +COPY db.properties . +RUN pip install --no-cache-dir -r requirements.txt +COPY databreachdetector.py . +CMD ["python", "databreachdetector.py"] \ No newline at end of file diff --git a/databreachdetector/README.md b/databreachdetector/README.md new file mode 100644 index 0000000..36f4676 --- /dev/null +++ b/databreachdetector/README.md @@ -0,0 +1,2 @@ +# Databreach detector (WIP) +## This is a script that looks for certain types of Data in the DB and uses the deduce library to find out if any particular data gets leasked into database. diff --git a/databreachdetector/databreachdetector.py b/databreachdetector/databreachdetector.py new file mode 100644 index 0000000..4cfd453 --- /dev/null +++ b/databreachdetector/databreachdetector.py @@ -0,0 +1,238 @@ +import psycopg2 +from configparser import ConfigParser +from stdnum import verhoeff +from deduce import Deduce +from minio import Minio +from minio.error import ResponseError +import re +import os + +def is_valid_verhoeff(number): + return verhoeff.is_valid(str(number)) + +def is_valid_email(email): + email_pattern = re.compile(r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$') + match = email_pattern.match(str(email)) + return bool(match) + +def is_valid_mobile_number(phone_number): + pattern = re.compile(r'^[912345678]\d{9}$') + match = re.match(pattern, str(phone_number)) + return bool(match) + +def find_names(text): + # Regular expression pattern to match names + pattern = re.compile(r'\b[A-Z][a-z]*\s[A-Z][a-z]*\b') + match =re.match(pattern,str(text)) + return bool(match) + +def find_ages(text): + # Regular expression pattern to match ages + pattern = re.compile(r'\b\d{1,2}\s*(?:years?|yrs?|yo|y\.o\.|months?|mos?)\b') + match =re.match(pattern,str(text)) + return bool(match) + +def find_dates(text): + # Regular expression pattern to match dates in formats like DD/MM/YYYY or MM/DD/YYYY + pattern = r'\b(?:\d{1,2}[/\-]\d{1,2}[/\-]\d{2,4})\b' + match =re.match(pattern,str(text)) + return bool(match) + +def find_urls(text): + # Regular expression pattern to match URLs + pattern = r'\b(?:https?://|www\.)\S+\b' + match =re.match(pattern,str(text)) + return bool(match) + +def find_locations(text): + # Regular expression pattern to match common location patterns + pattern = r'\b(?:city|town|village|state|province|country|continent|island)\s(?:of\s)?(?:\b[A-Z][a-z]*\b\s?)+' + match =re.match(pattern,str(text)) + return bool(match) + +def deduce_sensitive_data(connection, database_name, schema_name, output_file, ignore_columns, ignore_tables): + deduce_instance = Deduce() + + with connection.cursor() as cursor: + cursor.execute(f"SET search_path TO {schema_name}") + cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema=%s", (schema_name,)) + tables = [table[0] for table in cursor.fetchall()] + + with open(output_file, 'a') as deduced_file: + for table_name in tables: + if ignore_tables and table_name in ignore_tables: + # print(f"Ignoring Table: {table_name} in Database: {database_name}") + continue + + print(f"Currently checking Table: {table_name} in Database: {database_name}") + deduced_file.write(f"Currently checking Table: {table_name} in Database: {database_name}\n") + + cursor.execute(f'SELECT * FROM {table_name}') + rows = cursor.fetchall() + + id_count = 0 + mail_count = 0 + mobile_count = 0 + name = 0 + age = 0 + date = 0 + url = 0 + locations = 0 + for row in rows: + for i, column_value in enumerate(row): + column_name = cursor.description[i][0] + + if ignore_columns and column_name in ignore_columns: + continue + config = ConfigParser() + deduced_result = deduce_instance.deidentify( + #getting the disabled groups from db.properties file. + str(column_value), + disabled= config.get('disabled_f', 'disabled', fallback='') + ) + + if deduced_result.annotations and is_valid_verhoeff(column_value): + id_count += 1 + deduced_file.write(f"Column: {column_name}, Data: {column_value}\n") + deduced_file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + with open('mobile_numbers.txt', 'a') as file: + if deduced_result.annotations and is_valid_mobile_number(column_value): + mobile_count += 1 + file.write(f"Column: {column_name}, Data: {column_value}\n") + file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + with open('mails.txt', 'a') as file: + if deduced_result.annotations and is_valid_email(column_value): + mail_count += 1 + file.write(f"Column: {column_name}, Data: {column_value}\n") + file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + + if deduced_result.annotations and find_names(column_value): + with open('names.txt', 'a') as file: + name += 1 + file.write(f"Column: {column_name}, Data: {column_value}\n") + file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + if deduced_result.annotations and find_ages(column_value): + with open('ages.txt', 'a') as file: + age += 1 + file.write(f"Column: {column_name}, Data: {column_value}\n") + file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + if deduced_result.annotations and find_dates(column_value): + with open('dates.txt', 'a') as file: + date += 1 + file.write(f"Column: {column_name}, Data: {column_value}\n") + file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + if deduced_result.annotations and find_urls(column_value): + with open('url.txt', 'a') as file: + url += 1 + file.write(f"Column: {column_name}, Data: {column_value}\n") + file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + if deduced_result.annotations and find_locations(column_value): + with open('locations.txt', 'a') as file: + locations += 1 + file.write(f"Column: {column_name}, Data: {column_value}\n") + file.write(f"Deduced Findings: {deduced_result.annotations}\n\n") + + print(f"{mail_count} mail id's, {mobile_count} mobile numbers, {id_count} id's, {name} names, {age} ages, {date} dates, {url} urls, {locations} locations are found in {table_name} table in {database_name} database") + +def push_reports_to_s3(s3_host, s3_region, s3_user_key, s3_user_secret, s3_bucket_name): + mc = Minio(s3_host, + access_key=s3_user_key, + secret_key=s3_user_secret, + region=s3_region, + secure=False) # Set secure=True if using HTTPS + + try: + if not mc.bucket_exists(s3_bucket_name): + mc.make_bucket(s3_bucket_name, location=s3_region) + + # Ensure files exist before attempting to upload + for filename in ['id.txt', 'mails.txt', 'mobile_numbers.txt','names.txt','ages.txt','dates.txt','urls.txt','locations.txt']: + open(filename, 'a').close() + + mc.fput_object(s3_bucket_name, 'reports/id.txt', 'id.txt') + mc.fput_object(s3_bucket_name, 'reports/mails.txt', 'mails.txt') + mc.fput_object(s3_bucket_name, 'reports/mobile_numbers.txt', 'mobile_numbers.txt') + mc.fput_object(s3_bucket_name, 'reports/names.txt', 'names.txt') + mc.fput_object(s3_bucket_name, 'reports/ages.txt', 'ages.txt') + mc.fput_object(s3_bucket_name, 'reports/dates.txt', 'dates.txt') + mc.fput_object(s3_bucket_name, 'reports/url.txt', 'url.txt') + mc.fput_object(s3_bucket_name, 'reports/locations.txt', 'locations.txt') + print("\nReports pushed to MinIO") + + except ResponseError as err: + print(f"MinIO Error: {err}") + +def deduce_sensitive_data_in_databases(): + # Initialize config variable + config = ConfigParser() + + # If environment variables are not set, read from db.properties file + if not all([os.environ.get('db-server'), os.environ.get('db-port'), os.environ.get('db-su-user'), + os.environ.get('postgres-password'), os.environ.get('s3-host'), os.environ.get('s3-region'), + os.environ.get('s3-user-key'), os.environ.get('s3-user-secret'), os.environ.get('s3-bucket-name')]): + config.read('db.properties') + + # Read PostgreSQL and MinIO details from environment variables or db.properties + db_server = os.environ.get('db-server') or config.get('PostgreSQL Connection', 'db-server', fallback='') + db_port = os.environ.get('db-port') or config.get('PostgreSQL Connection', 'db-port', fallback='') + db_user = os.environ.get('db-su-user') or config.get('PostgreSQL Connection', 'db-su-user', fallback='') + db_password = os.environ.get('postgres-password') or config.get('PostgreSQL Connection', 'postgres-password', fallback='') + + minio_host = os.environ.get('s3-host') or config.get('MinIO Connection', 's3-host', fallback='') + minio_region = os.environ.get('s3-region') or config.get('MinIO Connection', 's3-region', fallback='') + minio_user_key = os.environ.get('s3-user-key') or config.get('MinIO Connection', 's3-user-key', fallback='') + minio_user_secret = os.environ.get('s3-user-secret') or config.get('MinIO Connection', 's3-user-secret', fallback='') + minio_bucket_name = os.environ.get('s3-bucket-name') or config.get('MinIO Connection', 's3-bucket-name', fallback='') + + # Read ignored tables and columns from db.properties + ignore_tables_str = config.get('Ignored Tables', 'ignore_tables', fallback='') + ignore_columns_str = config.get('Ignored Columns', 'ignore_columns', fallback='') + + ignore_tables = [table.strip() for table in ignore_tables_str.split(',')] if ignore_tables_str else [] + ignore_columns = [column.strip() for column in ignore_columns_str.split(',')] if ignore_columns_str else [] + + # Define the databases list + databases = [ + {"name": "mosip_resident", "schema": "resident"}, + # Add other databases as needed + ] + + connection = psycopg2.connect( + host=db_server, + port=db_port, + user=db_user, + password=db_password, + database=databases[0]['name'] + ) + + try: + output_file_path = 'id.txt' + + for db_info in databases: + print(f"\nAnalyzing data in Database: {db_info['name']}\n") + deduce_sensitive_data(connection, db_info['name'], db_info['schema'], output_file_path, ignore_columns, + ignore_tables) + + print(f"\nDeduced findings saved to respective text files") + + # Add the following lines to push reports to MinIO + s3_host = minio_host + s3_region = minio_region + s3_user_key = minio_user_key + s3_user_secret = minio_user_secret + s3_bucket_name = minio_bucket_name + + push_reports_to_s3(s3_host, s3_region, s3_user_key, s3_user_secret, s3_bucket_name) + + finally: + connection.close() + +# Call the main function +deduce_sensitive_data_in_databases() diff --git a/databreachdetector/db.properties b/databreachdetector/db.properties new file mode 100644 index 0000000..76a4248 --- /dev/null +++ b/databreachdetector/db.properties @@ -0,0 +1,22 @@ +[PostgreSQL Connection] +db-server = postgres.dev-staging.mosip.net +db-port = 5432 +db-su-user = postgres +postgres-password = ####### + +[MinIO Connection] +s3-host = http://minio.minio:9000 +s3-region = +s3-user-key = admin +s3-user-secret = +s3-bucket-name = security-rig + +[Ignored Tables] +ignore_tables = client_detail, reg_available_slot, batch_job_execution, batch_job_execution_context, batch_job_execution_params, batch_job_instance, batch_step_execution, batch_step_execution_context + +[Ignored Columns] +ignore_columns = + +[disabled_f] +disabled= institutions, +#add the required groups to exlude from the search names,urls,dates,locations,mail,phonenumber,id \ No newline at end of file diff --git a/databreachdetector/requirements.txt b/databreachdetector/requirements.txt new file mode 100644 index 0000000..7fffa10 --- /dev/null +++ b/databreachdetector/requirements.txt @@ -0,0 +1,4 @@ +psycopg2-binary==2.9.1 +python-stdnum==1.19 +deduce==2.4.4 +minio==6.0.2 diff --git a/deploy/auditsweeper/README.md b/deploy/auditsweeper/README.md new file mode 100644 index 0000000..8a6877c --- /dev/null +++ b/deploy/auditsweeper/README.md @@ -0,0 +1,18 @@ +# auditsweeper +Helm chart for installing auditsweeper + +## Introduction +It's a cronjob that goes through the audit table and cleans up the audit logs after a customisable no of days. + +## Install +* Review the `values.yaml` file and ensure that the database parameter values and log_age_days are set according to your environment +* RUN Install script +``` +./install.sh +``` + +# TL;DR +```console +$ helm repo add mosip https://mosip.github.io +$ helm install my-release mosip/auditsweeper +``` \ No newline at end of file diff --git a/deploy/auditsweeper/copy_cm.sh b/deploy/auditsweeper/copy_cm.sh new file mode 100755 index 0000000..e033860 --- /dev/null +++ b/deploy/auditsweeper/copy_cm.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination (current) namespace + +function copying_cm() { + UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh + COPY_UTIL=./copy_cm_func.sh + + wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh + + DST_NS=auditsweeper + + $COPY_UTIL configmap global default $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function + + + diff --git a/deploy/auditsweeper/copy_secrets.sh b/deploy/auditsweeper/copy_secrets.sh new file mode 100755 index 0000000..e1a4ddc --- /dev/null +++ b/deploy/auditsweeper/copy_secrets.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copy secrets from other namespaces +# DST_NS: Destination namespace + +function copying_secrets() { + UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh + COPY_UTIL=./copy_cm_func.sh + + wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh + + DST_NS=auditsweeper + $COPY_UTIL secret postgres-postgresql postgres $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_secrets # calling function diff --git a/deploy/auditsweeper/delete.sh b/deploy/auditsweeper/delete.sh new file mode 100755 index 0000000..c677338 --- /dev/null +++ b/deploy/auditsweeper/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls print service +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_auditsweeper() { + NS=auditsweeper + while true; do + read -p "Are you sure you want to delete print helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete auditsweeper + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_auditsweeper # calling function diff --git a/deploy/auditsweeper/install.sh b/deploy/auditsweeper/install.sh new file mode 100755 index 0000000..670f720 --- /dev/null +++ b/deploy/auditsweeper/install.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Installs sample print service +## Usage: ./restart.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + + +NS=auditsweeper +CHART_VERSION=0.0.1-develop + +echo Create $NS namespace +kubectl create ns $NS + +function installing_auditsweeper() { + echo Istio label + kubectl label ns $NS istio-injection=disabled --overwrite + helm repo update + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + ./copy_cm.sh + + echo Copy secrets + sed -i 's/\r$//' copy_secrets.sh + ./copy_secrets.sh + + echo Installing auditsweeper + helm -n $NS install auditsweeper mosip/auditsweeper -f values.yaml --wait --version $CHART_VERSION + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_auditsweeper # calling function diff --git a/deploy/auditsweeper/values.yaml b/deploy/auditsweeper/values.yaml new file mode 100644 index 0000000..7d00ead --- /dev/null +++ b/deploy/auditsweeper/values.yaml @@ -0,0 +1,12 @@ + +crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 ) + +auditsweeper: + configmaps: + db: + db-port: '5432' + db-su-user: 'postgres' + db-host: 'postgres.sandbox.mosip.net' + auditsweeper: + log-age-days: '90' + diff --git a/deploy/databreachdetector/copy_cm.sh b/deploy/databreachdetector/copy_cm.sh new file mode 100755 index 0000000..e907011 --- /dev/null +++ b/deploy/databreachdetector/copy_cm.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination (current) namespace + +function copying_cm() { + + UTIL_URL=https:https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh + COPY_UTIL=./copy_cm_func.sh + + wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh + + DST_NS=databreachdetector + + $COPY_UTIL configmap global default $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function + + + diff --git a/deploy/databreachdetector/copy_secrets.sh b/deploy/databreachdetector/copy_secrets.sh new file mode 100755 index 0000000..df8f75b --- /dev/null +++ b/deploy/databreachdetector/copy_secrets.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copy secrets from other namespaces +# DST_NS: Destination namespace + +function copying_secrets() { + UTIL_URL=https:https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh + COPY_UTIL=./copy_cm_func.sh + + wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh + + DST_NS=databreachdetector + + $COPY_UTIL secret s3 s3 $DST_NS + $COPY_UTIL secret postgres-postgresql postgres $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_secrets # calling function diff --git a/deploy/databreachdetector/delete.sh b/deploy/databreachdetector/delete.sh new file mode 100755 index 0000000..b769725 --- /dev/null +++ b/deploy/databreachdetector/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls print service +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_databreachdetector() { + NS=databreachdetector + while true; do + read -p "Are you sure you want to delete print helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete databreachdetector + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_databreachdetector # calling function diff --git a/deploy/databreachdetector/install.sh b/deploy/databreachdetector/install.sh new file mode 100755 index 0000000..8438bfa --- /dev/null +++ b/deploy/databreachdetector/install.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Installs sample print service +## Usage: ./restart.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + + +NS=databreachdetector +CHART_VERSION=0.0.1-develop + +echo Create $NS namespace +kubectl create ns $NS + +function installing_databreachdetector() { + echo Istio label + kubectl label ns $NS istio-injection=disabled --overwrite + helm repo update + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + kubectl -n $NS delete --ignore-not-found=true cm s3 + ./copy_cm.sh + + echo Copy secrets + sed -i 's/\r$//' copy_secrets.sh + ./copy_secrets.sh + + DB_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-postgres-host"' ) + S3_USER_KEY=$( kubectl -n s3 get cm s3 -o json |jq -r '.data."s3-user-key"' ) + S3_REGION=$( kubectl -n s3 get cm s3 -o json |jq -r '.data."s3-region"' ) + + echo Installing databreachdetector + helm -n $NS install databreachdetector mosip/databreachdetector --wait --version $CHART_VERSION \ + --set databreachdetector.configmaps.db.db-server="$DB_HOST" \ + --set databreachdetector.configmaps.s3.s3-bucket-name='secure-datarig' \ + --set databreachdetector.configmaps.s3.s3-region="$S3_REGION" \ + --set databreachdetector.configmaps.s3.s3-host='minio.minio:9000' \ + --set databreachdetector.configmaps.s3.s3-user-key="$S3_USER_KEY" + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_databreachdetector # calling function + diff --git a/deploy/mosipcertmanager/README.md b/deploy/mosipcertmanager/README.md new file mode 100644 index 0000000..f8c6aec --- /dev/null +++ b/deploy/mosipcertmanager/README.md @@ -0,0 +1,18 @@ +# mosipcertmanager +Helm chart for installing mosipcertmanager + +## Introduction +It's a cronjob that checks partner certificate expiry dates and renews the certificates if expired. + +## Install +* Review the `values.yaml` file and ensure that the database parameter values and partner IDs are set according to your environment +* RUN Install script +``` +./install.sh +``` + +# TL;DR +```console +$ helm repo add mosip https://mosip.github.io +$ helm install my-release mosip/mosipcertmanager +``` \ No newline at end of file diff --git a/deploy/mosipcertmanager/copy_cm.sh b/deploy/mosipcertmanager/copy_cm.sh new file mode 100755 index 0000000..7cdda1f --- /dev/null +++ b/deploy/mosipcertmanager/copy_cm.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination (current) namespace + +function copying_cm() { + UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh + COPY_UTIL=./copy_cm_func.sh + + wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh + + DST_NS=mosipcertmanager + + $COPY_UTIL configmap global default $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function + + + diff --git a/deploy/mosipcertmanager/copy_secrets.sh b/deploy/mosipcertmanager/copy_secrets.sh new file mode 100755 index 0000000..4c1fbfe --- /dev/null +++ b/deploy/mosipcertmanager/copy_secrets.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copy secrets from other namespaces +# DST_NS: Destination namespace + +function copying_secrets() { + UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh + COPY_UTIL=./copy_cm_func.sh + + wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh + + DST_NS=mosipcertmanager + $COPY_UTIL secret postgres-postgresql postgres $DST_NS + $COPY_UTIL secret keycloak-client-secrets keycloak $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_secrets # calling function diff --git a/deploy/mosipcertmanager/delete.sh b/deploy/mosipcertmanager/delete.sh new file mode 100755 index 0000000..b089ab2 --- /dev/null +++ b/deploy/mosipcertmanager/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls print service +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_mosipcertmanager() { + NS=mosipcertmanager + while true; do + read -p "Are you sure you want to delete print helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete mosipcertmanager + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_mosipcertmanager # calling function diff --git a/deploy/mosipcertmanager/install.sh b/deploy/mosipcertmanager/install.sh new file mode 100755 index 0000000..af72088 --- /dev/null +++ b/deploy/mosipcertmanager/install.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Installs sample print service +## Usage: ./restart.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + + +NS=mosipcertmanager +CHART_VERSION=0.0.1-develop + +echo Create $NS namespace +kubectl create ns $NS + +function installing_mosipcertmanager() { + echo Istio label + kubectl label ns $NS istio-injection=disabled --overwrite + helm repo update + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + ./copy_cm.sh + + echo Copy secrets + sed -i 's/\r$//' copy_secrets.sh + ./copy_secrets.sh + + echo Installing mosipcertmanager + helm -n $NS install mosipcertmanager mosip/mosipcertmanager -f values.yaml --wait --version $CHART_VERSION + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_mosipcertmanager # calling function diff --git a/deploy/mosipcertmanager/values.yaml b/deploy/mosipcertmanager/values.yaml new file mode 100644 index 0000000..2c0b6f2 --- /dev/null +++ b/deploy/mosipcertmanager/values.yaml @@ -0,0 +1,14 @@ + +crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 ) + +mosipcertmanager: + configmaps: + db: + db-port: '5432' + db-su-user: 'postgres' + db-host: 'postgres.sandbox.mosip.net' + partner-ids-env: + PARTNER_IDS_ENV: mpartner-default-print,mpartner-default-abis,mpartner-default-mobile,mpartner-default-digitalcard,mpartner-default-auth,mpartner-default-resident,mpartner-default-demo-oidc,mpartner-default-resident-oidc,mpartner-default-mimotooidc,mpartner-default-esignet,mpartner-default-mimotokeybinding + mosipcertmanager: + pre-expiry-days: '40' + diff --git a/helm/auditsweeper/Chart.yaml b/helm/auditsweeper/Chart.yaml new file mode 100644 index 0000000..853618c --- /dev/null +++ b/helm/auditsweeper/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: auditsweeper +description: A Helm chart to deploy auditsweeper +type: application +version: 0.0.1-develop +appVersion: "" +dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +home: https://mosip.io +keywords: + - mosip + - auditsweeper +maintainers: + - email: info@mosip.io + name: MOSIP diff --git a/helm/auditsweeper/README.md b/helm/auditsweeper/README.md new file mode 100644 index 0000000..4fd211b --- /dev/null +++ b/helm/auditsweeper/README.md @@ -0,0 +1,12 @@ +# mosipcertmanager +Helm chart for installing auditsweeper + +## Introduction +t's a cronjob that goes through the audit table and cleans up the audit logs after a customisable no of days. + +# TL;DR +```console +$ helm repo add mosip https://mosip.github.io +$ helm install my-release mosip/auditsweeper +``` + diff --git a/helm/auditsweeper/templates/NOTES.txt b/helm/auditsweeper/templates/NOTES.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/helm/auditsweeper/templates/NOTES.txt @@ -0,0 +1 @@ + diff --git a/helm/auditsweeper/templates/_helpers.tpl b/helm/auditsweeper/templates/_helpers.tpl new file mode 100644 index 0000000..c7fab14 --- /dev/null +++ b/helm/auditsweeper/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* +Return the proper image name +*/}} +{{- define "auditsweeper.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "auditsweeper.volumePermissions.image" -}} +{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "auditsweeper.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "auditsweeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (printf "%s-foo" (include "common.names.fullname" .)) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "auditsweeper.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "auditsweeper.validateValues.foo" .) -}} +{{- $messages := append $messages (include "auditsweeper.validateValues.bar" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message -}} +{{- end -}} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "auditsweeper.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* Create the name for restart cronjob */}} +{{- define "auditsweeper.cronjob" -}} +{{ default (printf "cronjob-%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }} +{{- end -}} diff --git a/helm/auditsweeper/templates/clusterrole.yaml b/helm/auditsweeper/templates/clusterrole.yaml new file mode 100644 index 0000000..9a555c7 --- /dev/null +++ b/helm/auditsweeper/templates/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-deployment-clusterrole +rules: + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "patch", "list", "watch"] \ No newline at end of file diff --git a/helm/auditsweeper/templates/clusterrolebinding.yaml b/helm/auditsweeper/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..742d997 --- /dev/null +++ b/helm/auditsweeper/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-deployment-clusterrolebinding +subjects: + - kind: ServiceAccount + name: {{ template "auditsweeper.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-deployment-clusterrole + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/helm/auditsweeper/templates/configmaps.yaml b/helm/auditsweeper/templates/configmaps.yaml new file mode 100644 index 0000000..f7e85f8 --- /dev/null +++ b/helm/auditsweeper/templates/configmaps.yaml @@ -0,0 +1,21 @@ +{{- if .Values.auditsweeper.configmaps }} + {{- range $cm_name, $cm_value := .Values.auditsweeper.configmaps }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $cm_name }} + namespace: {{ $.Release.Namespace }} + labels: {{- include "common.labels.standard" $ | nindent 8 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} +data: + {{- range $key, $value := $cm_value }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/helm/auditsweeper/templates/cronjob.yaml b/helm/auditsweeper/templates/cronjob.yaml new file mode 100644 index 0000000..6d394c2 --- /dev/null +++ b/helm/auditsweeper/templates/cronjob.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }} +kind: CronJob +metadata: + name: {{ template "auditsweeper.cronjob" $ }} + namespace: {{ .Release.Namespace }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + +spec: + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 # remove jobs which are successfully executed + failedJobsHistoryLimit: 1 # except 1 recent failed job, remove jobs which are not successfully executed + #schedule: '*/3 * * * *' # cron spec of time + schedule: {{ .Values.crontime }} + jobTemplate: + spec: + backoffLimit: 0 # this has very low chance of failing, as all this does + # is prompt kubernetes to schedule new replica set for + # the deployment + # activeDeadlineSeconds: 600 # timeout, makes most sense with + # "waiting for rollout" variant specified below + template: + spec: + # account configured above + restartPolicy: Never + serviceAccountName: {{ template "auditsweeper.serviceAccountName" $ }} + containers: + - name: {{ template "auditsweeper.serviceAccountName" $ }} + image: {{ template "auditsweeper.image" $ }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tpvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tpvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tpvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: container_user + value: {{ .Values.containerSecurityContext.runAsUser }} + {{- if .Values.extraEnvVars }} + {{- include "common.tpvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + {{- range .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + {{- range .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} diff --git a/helm/auditsweeper/templates/extra-list.yaml b/helm/auditsweeper/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/helm/auditsweeper/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/helm/auditsweeper/templates/secrets.yaml b/helm/auditsweeper/templates/secrets.yaml new file mode 100644 index 0000000..83a2042 --- /dev/null +++ b/helm/auditsweeper/templates/secrets.yaml @@ -0,0 +1,21 @@ +{{- if .Values.auditsweeper.secrets }} +{{- range $secret_name, $secret_value := .Values.auditsweeper.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secret_name }} + namespace: {{ $.Release.Namespace }} + labels: {{- include "common.labels.standard" $ | nindent 8 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} +type: Opaque +data: + {{- range $key, $value := $secret_value }} + {{ $key }}: {{ $value | b64enc | quote }} + {{- end }} +{{- end }} +{{- end }} diff --git a/helm/auditsweeper/templates/service-account.yaml b/helm/auditsweeper/templates/service-account.yaml new file mode 100644 index 0000000..f3aa762 --- /dev/null +++ b/helm/auditsweeper/templates/service-account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + name: {{ template "auditsweeper.serviceAccountName" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} diff --git a/helm/auditsweeper/values.yaml b/helm/auditsweeper/values.yaml new file mode 100644 index 0000000..6157477 --- /dev/null +++ b/helm/auditsweeper/values.yaml @@ -0,0 +1,374 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Add labels to all the deployed resources +## +commonLabels: + app.kubernetes.io/component: mosip + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Number of nodes +## +replicaCount: 1 + +service: + type: ClusterIP + port: 80 + ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + ## loadBalancerIP: + ## + ## nodePorts: + ## http: + ## https: + ## + nodePorts: + http: "" + https: "" + ## Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + +image: + registry: docker.io + repository: mosipdev/auditsweeper + tag: develop + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Port on which this particular spring service module is running. +# springServicePort: 8083 + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## + +## +# existingConfigmap: + +## Command and args for running the container (set to default if not set). Use array form +## +command: [] +args: [] + +## Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] + +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +# resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + ## cpu: 1000m + ## memory: 3500Mi + # requests: + ## cpu: 1000m + # memory: 3500Mi + +additionalResources: + ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources + ## Example: java_opts: "-Xms500M -Xmx500M" + javaOpts: "-Xms2600M -Xmx2600M" + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Clamav container already runs as 'mosip' user, so we may not need to enable this +containerSecurityContext: + enabled: false + runAsUser: mosip + runAsNonRoot: true + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + enabled: false + fsGroup: 1001 + +## Pod affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAffinityPreset: "" + +## Pod anti-affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## Allowed values: soft, hard +## +nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + ## + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## Affinity for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Pod extra labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Annotations for server pods. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## pods' priority. +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +# priorityClassName: "" + +## lifecycleHooks for the container to automate configuration before or after startup. +## +lifecycleHooks: {} + +## Custom Liveness probes for +## +customLivenessProbe: {} + +## Custom Rediness probes +## +customReadinessProbe: {} + +## Update strategy - only really applicable for deployments with RWO PVs attached +## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the +## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will +## terminate the single previous pod, so that the new, incoming pod can attach to the PV +## +updateStrategy: + type: RollingUpdate + +## Additional environment variables to set +## Example: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] + +## ConfigMap with extra environment variables +## +extraEnvVarsCM: + - global + - db + - auditsweeper +## Secret with extra environment variables +## +extraEnvVarsSecret: + - postgres-postgresql + +## Extra volumes to add to the deployment +## +extraVolumes: [] + +## Extra volume mounts to add to the container +## +extraVolumeMounts: [] + +## Add init containers to the pods. +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## + +## Add sidecars to the pods. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + ## - myRegistryKeySecretName + ## Init containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## + limits: {} + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## cpu: 100m + ## memory: 128Mi + ## + +## Specifies whether RBAC resources should be created +## +rbac: + create: true + +## Specifies whether a ServiceAccount should be created +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + +## Prometheus Metrics +## +metrics: + enabled: false + ## Prometheus pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + + endpointPath: + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: true + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 10s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + # rules: + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + rules: [] + +## Admin swagger should have only internal access. Hence linked to internal gateway + +crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 ) + +auditsweeper: + configmaps: + db: + db-port: '5432' + db-su-user: 'postgres' + db-host: 'postgres.sandbox.mosip.net' + auditsweeper: + log-age-days: '90' + +enable_insecure: false diff --git a/helm/databreachdetector/Chart.yaml b/helm/databreachdetector/Chart.yaml new file mode 100644 index 0000000..090253c --- /dev/null +++ b/helm/databreachdetector/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: databreachdetector +description: A Helm chart to deploy databreachdetector to test working of MOSIP modules +type: application +version: 0.0.1-develop +appVersion: "" +dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +home: https://mosip.io +keywords: + - mosip + - databreachdetector +maintainers: + - email: info@mosip.io + name: MOSIP diff --git a/helm/databreachdetector/README.md b/helm/databreachdetector/README.md new file mode 100644 index 0000000..2de1511 --- /dev/null +++ b/helm/databreachdetector/README.md @@ -0,0 +1,10 @@ +# databreachdetector + +Helm chart to deploy databreachdetector for `MOSIP` modules + +## TL;DR + +```console +$ helm repo add mosip https://mosip.github.io +$ helm install my-release mosip/databreachdetector +``` diff --git a/helm/databreachdetector/templates/NOTES.txt b/helm/databreachdetector/templates/NOTES.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/helm/databreachdetector/templates/NOTES.txt @@ -0,0 +1 @@ + diff --git a/helm/databreachdetector/templates/_helpers.tpl b/helm/databreachdetector/templates/_helpers.tpl new file mode 100644 index 0000000..1dfe9b0 --- /dev/null +++ b/helm/databreachdetector/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* +Return the proper image name +*/}} +{{- define "databreachdetector.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "databreachdetector.volumePermissions.image" -}} +{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "databreachdetector.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "databreachdetector.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (printf "%s-foo" (include "common.names.fullname" .)) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "databreachdetector.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "databreachdetector.validateValues.foo" .) -}} +{{- $messages := append $messages (include "databreachdetector.validateValues.bar" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message -}} +{{- end -}} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "databreachdetector.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* Create the name for restart cronjob */}} +{{- define "databreachdetector.cronjob" -}} +{{ default (printf "cronjob-%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }} +{{- end -}} diff --git a/helm/databreachdetector/templates/configmaps.yaml b/helm/databreachdetector/templates/configmaps.yaml new file mode 100644 index 0000000..2d76a76 --- /dev/null +++ b/helm/databreachdetector/templates/configmaps.yaml @@ -0,0 +1,21 @@ +{{- if .Values.databreachdetector.configmaps }} +{{- range $cm_name, $cm_value := .Values.databreachdetector.configmaps }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $cm_name }} + namespace: {{ $.Release.Namespace }} + labels: {{- include "common.labels.standard" $ | nindent 8 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} +data: + {{- range $key, $value := $cm_value }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/databreachdetector/templates/cronjob.yaml b/helm/databreachdetector/templates/cronjob.yaml new file mode 100644 index 0000000..dc134b6 --- /dev/null +++ b/helm/databreachdetector/templates/cronjob.yaml @@ -0,0 +1,80 @@ +{{- range $type := $.Values.types }} +{{- if $type.enabled }} +--- +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }} +kind: CronJob +metadata: + name: {{ template "databreachdetector.cronjob" $ }}-{{ $type.name }} + namespace: {{ $.Release.Namespace }} + annotations: + {{- if $.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + +spec: + {{- if eq $type.name "full" }} + suspend: false + {{- end }} + {{- if eq $type.name "sanity" }} + suspend: true + {{- end }} + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 # remove jobs which are successfully executed + failedJobsHistoryLimit: 1 # except 1 recent failed job, remove jobs which are not successfully executed + #schedule: '*/3 * * * *' # cron spec of time + schedule: {{ $.Values.crontime }} + jobTemplate: + spec: + backoffLimit: 0 # this has very low chance of failing, as all this does + # is prompt kubernetes to schedule new replica set for + # the deployment + # activeDeadlineSeconds: 600 # timeout, makes most sense with + # "waiting for rollout" variant specified below + template: + spec: + # account configured above + restartPolicy: Never + containers: + - name: {{ template "databreachdetector.serviceAccountName" $ }}-{{ $type.name }} + image: {{ template "databreachdetector.image" $ }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.lifecycleHooks }} + lifecycle: {{- include "common.tpvalues.render" (dict "value" $.Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if $.Values.containerSecurityContext.enabled }} + securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if $.Values.command }} + command: {{- include "common.tpvalues.render" (dict "value" $.Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if $.Values.args }} + args: {{- include "common.tpvalues.render" (dict "value" $.Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: container_user + value: {{ $.Values.containerSecurityContext.runAsUser }} + {{- if $.Values.extraEnvVars }} + {{- include "common.tpvalues.render" (dict "value" $.Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if $.Values.extraEnvVarsCM }} + {{- range $.Values.extraEnvVarsCM }} + - configMapRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- if $.Values.extraEnvVarsSecret }} + {{- range $.Values.extraEnvVarsSecret }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + ports: + - name: spring-service + containerPort: {{ $.Values.springServicePort }} +{{- end }} +{{- end }} diff --git a/helm/databreachdetector/templates/extra-list.yaml b/helm/databreachdetector/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/helm/databreachdetector/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/helm/databreachdetector/templates/secrets.yaml b/helm/databreachdetector/templates/secrets.yaml new file mode 100644 index 0000000..1cbf73b --- /dev/null +++ b/helm/databreachdetector/templates/secrets.yaml @@ -0,0 +1,21 @@ +{{- if .Values.databreachdetector.secrets }} +{{- range $secret_name, $secret_value := .Values.databreachdetector.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secret_name }} + namespace: {{ $.Release.Namespace }} + labels: {{- include "common.labels.standard" $ | nindent 8 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} +type: Opaque +data: + {{- range $key, $value := $secret_value }} + {{ $key }}: {{ $value | b64enc | quote }} + {{- end }} +{{- end }} +{{- end }} diff --git a/helm/databreachdetector/templates/service-account.yaml b/helm/databreachdetector/templates/service-account.yaml new file mode 100644 index 0000000..650cbb7 --- /dev/null +++ b/helm/databreachdetector/templates/service-account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + name: {{ template "databreachdetector.serviceAccountName" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} diff --git a/helm/databreachdetector/values.yaml b/helm/databreachdetector/values.yaml new file mode 100644 index 0000000..da62c5c --- /dev/null +++ b/helm/databreachdetector/values.yaml @@ -0,0 +1,438 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Add labels to all the deployed resources +## +commonLabels: + app.kubernetes.io/component: mosip + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Number of nodes +## +replicaCount: 1 + +service: + type: ClusterIP + port: 80 + ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + ## loadBalancerIP: + ## + ## nodePorts: + ## http: + ## https: + ## + nodePorts: + http: "" + https: "" + ## Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + +image: + registry: docker.io + repository: mosipqa/databreachdetector + tag: develop + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Port on which this particular spring service module is running. +springServicePort: 8083 + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## + +## +# existingConfigmap: + +## Command and args for running the container (set to default if not set). Use array form +## +command: [] +args: [] + +## Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] + +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1000m + memory: 3500Mi + requests: + cpu: 1000m + memory: 3500Mi + +additionalResources: + ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources + ## Example: java_opts: "-Xms500M -Xmx500M" + javaOpts: "-Xms2600M -Xmx2600M" + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Clamav container already runs as 'mosip' user, so we may not need to enable this +containerSecurityContext: + enabled: false + runAsUser: mosip + runAsNonRoot: true + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + enabled: false + fsGroup: 1001 + +## Pod affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAffinityPreset: "" + +## Pod anti-affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## Allowed values: soft, hard +## +nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + ## + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## Affinity for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Pod extra labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Annotations for server pods. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## pods' priority. +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +# priorityClassName: "" + +## lifecycleHooks for the container to automate configuration before or after startup. +## +lifecycleHooks: {} + +## Custom Liveness probes for +## +customLivenessProbe: {} + +## Custom Rediness probes +## +customReadinessProbe: {} + +## Update strategy - only really applicable for deployments with RWO PVs attached +## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the +## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will +## terminate the single previous pod, so that the new, incoming pod can attach to the PV +## +updateStrategy: + type: RollingUpdate + +## Additional environment variables to set +## Example: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] + +## ConfigMap with extra environment variables +## +extraEnvVarsCM: + - global + - s3 + - db +## Secret with extra environment variables +## +extraEnvVarsSecret: + - s3 + - postgres-postgresql + +## Extra volumes to add to the deployment +## +extraVolumes: [] + +## Extra volume mounts to add to the container +## +extraVolumeMounts: [] + +## Add init containers to the pods. +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: + - command: + - /bin/bash + - -c + - if [ "$ENABLE_INSECURE" = "true" ]; then HOST=$( env | grep "mosip-api-internal-host" + |sed "s/mosip-api-internal-host=//g"); if [ -z "$HOST" ]; then echo "HOST + $HOST is empty; EXITING"; exit 1; fi; openssl s_client -servername "$HOST" + -connect "$HOST":443 > "$HOST.cer" 2>/dev/null & sleep 2 ; sed -i -ne '/-BEGIN + CERTIFICATE-/,/-END CERTIFICATE-/p' "$HOST.cer"; cat "$HOST.cer"; /usr/local/openjdk-11/bin/keytool + -delete -alias "$HOST" -keystore $JAVA_HOME/lib/security/cacerts -storepass + changeit; /usr/local/openjdk-11/bin/keytool -trustcacerts -keystore "$JAVA_HOME/lib/security/cacerts" + -storepass changeit -noprompt -importcert -alias "$HOST" -file "$HOST.cer" + ; if [ $? -gt 0 ]; then echo "Failed to add SSL certificate for host $host; + EXITING"; exit 1; fi; cp /usr/local/openjdk-11/lib/security/cacerts /cacerts; + fi + env: + - name: ENABLE_INSECURE + value: "true" + envFrom: + - configMapRef: + name: global + image: docker.io/openjdk:11-jre + imagePullPolicy: Always + name: cacerts + resources: {} + securityContext: + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /cacerts + name: cacerts + +## Add sidecars to the pods. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +persistence: + enabled: true + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack). + ## + # storageClass: "-" + ## + ## If you want to reuse an existing claim, you can pass the name of the PVC using + ## the existingClaim variable + # existingClaim: your-claim + ## ReadWriteMany not supported by AWS gp2 + storageClass: + accessModes: + - ReadWriteOnce + size: 100m + existingClaim: + nfs: + path: '/srv/nfs/mosip/dsl-scenarios/' + server: '' + # Dir where config and keys are written inside container + mountDir: '/home/mosip/mountvolume/scenarios' + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + ## - myRegistryKeySecretName + ## Init containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## + limits: {} + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## cpu: 100m + ## memory: 128Mi + ## + +## Specifies whether RBAC resources should be created +## +rbac: + create: true + +## Specifies whether a ServiceAccount should be created +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + +## Prometheus Metrics +## +metrics: + enabled: false + ## Prometheus pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + + endpointPath: + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: true + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 10s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + # rules: + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + rules: [] + +## Admin swagger should have only internal access. Hence linked to internal gateway + +types: + - name: full + enabled: true + +crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 ) + +databreachdetector: + configmaps: + s3: + s3-host: 'http://minio.minio:9000' + s3-user-key: 'admin' + s3-region: '' + db: + db-port: '5432' + db-su-user: 'postgres' + db-server: 'api-internal.sandbox.xyz.net' + secrets: + +enable_insecure: false diff --git a/helm/mosipcertmanager/Chart.yaml b/helm/mosipcertmanager/Chart.yaml new file mode 100644 index 0000000..a2dab74 --- /dev/null +++ b/helm/mosipcertmanager/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: mosipcertmanager +description: A Helm chart to deploy mosipcertmanager to test working of MOSIP modules +type: application +version: 0.0.1-develop +appVersion: "" +dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +home: https://mosip.io +keywords: + - mosip + - mosipcertmanager +maintainers: + - email: info@mosip.io + name: MOSIP diff --git a/helm/mosipcertmanager/README.md b/helm/mosipcertmanager/README.md new file mode 100644 index 0000000..9ac9c56 --- /dev/null +++ b/helm/mosipcertmanager/README.md @@ -0,0 +1,13 @@ +# mosipcertmanager +Helm chart for installing mosipcertmanager + +## Introduction +It's a cronjob that checks DBs for partner certificate expiry dates and renews the certificates if expired. + + +# TL;DR +```console +$ helm repo add mosip https://mosip.github.io +$ helm install my-release mosip/mosipcertmanager +``` + diff --git a/helm/mosipcertmanager/templates/NOTES.txt b/helm/mosipcertmanager/templates/NOTES.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/helm/mosipcertmanager/templates/NOTES.txt @@ -0,0 +1 @@ + diff --git a/helm/mosipcertmanager/templates/_helpers.tpl b/helm/mosipcertmanager/templates/_helpers.tpl new file mode 100644 index 0000000..896e7c6 --- /dev/null +++ b/helm/mosipcertmanager/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* +Return the proper image name +*/}} +{{- define "mosipcertmanager.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mosipcertmanager.volumePermissions.image" -}} +{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mosipcertmanager.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "mosipcertmanager.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (printf "%s-foo" (include "common.names.fullname" .)) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "mosipcertmanager.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "mosipcertmanager.validateValues.foo" .) -}} +{{- $messages := append $messages (include "mosipcertmanager.validateValues.bar" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message -}} +{{- end -}} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "mosipcertmanager.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* Create the name for restart cronjob */}} +{{- define "mosipcertmanager.cronjob" -}} +{{ default (printf "cronjob-%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }} +{{- end -}} diff --git a/helm/mosipcertmanager/templates/clusterrole.yaml b/helm/mosipcertmanager/templates/clusterrole.yaml new file mode 100644 index 0000000..9a555c7 --- /dev/null +++ b/helm/mosipcertmanager/templates/clusterrole.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-deployment-clusterrole +rules: + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "patch", "list", "watch"] \ No newline at end of file diff --git a/helm/mosipcertmanager/templates/clusterrolebinding.yaml b/helm/mosipcertmanager/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..9cc5faa --- /dev/null +++ b/helm/mosipcertmanager/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-deployment-clusterrolebinding +subjects: + - kind: ServiceAccount + name: {{ template "mosipcertmanager.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-deployment-clusterrole + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/helm/mosipcertmanager/templates/configmaps.yaml b/helm/mosipcertmanager/templates/configmaps.yaml new file mode 100644 index 0000000..bf60bd5 --- /dev/null +++ b/helm/mosipcertmanager/templates/configmaps.yaml @@ -0,0 +1,21 @@ +{{- if .Values.mosipcertmanager.configmaps }} + {{- range $cm_name, $cm_value := .Values.mosipcertmanager.configmaps }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $cm_name }} + namespace: {{ $.Release.Namespace }} + labels: {{- include "common.labels.standard" $ | nindent 8 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} +data: + {{- range $key, $value := $cm_value }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/helm/mosipcertmanager/templates/cronjob.yaml b/helm/mosipcertmanager/templates/cronjob.yaml new file mode 100644 index 0000000..4603ae6 --- /dev/null +++ b/helm/mosipcertmanager/templates/cronjob.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }} +kind: CronJob +metadata: + name: {{ template "mosipcertmanager.cronjob" $ }} + namespace: {{ .Release.Namespace }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + +spec: + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 # remove jobs which are successfully executed + failedJobsHistoryLimit: 1 # except 1 recent failed job, remove jobs which are not successfully executed + #schedule: '*/3 * * * *' # cron spec of time + schedule: {{ .Values.crontime }} + jobTemplate: + spec: + backoffLimit: 0 # this has very low chance of failing, as all this does + # is prompt kubernetes to schedule new replica set for + # the deployment + # activeDeadlineSeconds: 600 # timeout, makes most sense with + # "waiting for rollout" variant specified below + template: + spec: + # account configured above + restartPolicy: Never + serviceAccountName: {{ template "mosipcertmanager.serviceAccountName" $ }} + containers: + - name: {{ template "mosipcertmanager.serviceAccountName" $ }} + image: {{ template "mosipcertmanager.image" $ }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tpvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tpvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tpvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: container_user + value: {{ .Values.containerSecurityContext.runAsUser }} + {{- if .Values.extraEnvVars }} + {{- include "common.tpvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + {{- range .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + {{- range .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} diff --git a/helm/mosipcertmanager/templates/extra-list.yaml b/helm/mosipcertmanager/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/helm/mosipcertmanager/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/helm/mosipcertmanager/templates/secrets.yaml b/helm/mosipcertmanager/templates/secrets.yaml new file mode 100644 index 0000000..15f9aef --- /dev/null +++ b/helm/mosipcertmanager/templates/secrets.yaml @@ -0,0 +1,21 @@ +{{- if .Values.mosipcertmanager.secrets }} +{{- range $secret_name, $secret_value := .Values.mosipcertmanager.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secret_name }} + namespace: {{ $.Release.Namespace }} + labels: {{- include "common.labels.standard" $ | nindent 8 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} +type: Opaque +data: + {{- range $key, $value := $secret_value }} + {{ $key }}: {{ $value | b64enc | quote }} + {{- end }} +{{- end }} +{{- end }} diff --git a/helm/mosipcertmanager/templates/service-account.yaml b/helm/mosipcertmanager/templates/service-account.yaml new file mode 100644 index 0000000..eeff0bb --- /dev/null +++ b/helm/mosipcertmanager/templates/service-account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + name: {{ template "mosipcertmanager.serviceAccountName" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} diff --git a/helm/mosipcertmanager/values.yaml b/helm/mosipcertmanager/values.yaml new file mode 100644 index 0000000..3dec939 --- /dev/null +++ b/helm/mosipcertmanager/values.yaml @@ -0,0 +1,381 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Add labels to all the deployed resources +## +commonLabels: + app.kubernetes.io/component: mosip + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Number of nodes +## +replicaCount: 1 + +service: + type: ClusterIP + port: 80 + ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + ## loadBalancerIP: + ## + ## nodePorts: + ## http: + ## https: + ## + nodePorts: + http: "" + https: "" + ## Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + +image: + registry: docker.io + repository: mosipqa/certmanager + tag: develop + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Port on which this particular spring service module is running. +# springServicePort: 8083 + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## + +## +# existingConfigmap: + +## Command and args for running the container (set to default if not set). Use array form +## +command: [] +args: [] + +## Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] + +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +# resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + ## cpu: 1000m + ## memory: 3500Mi + # requests: + ## cpu: 1000m + # memory: 3500Mi + +additionalResources: + ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources + ## Example: java_opts: "-Xms500M -Xmx500M" + javaOpts: "-Xms2600M -Xmx2600M" + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Clamav container already runs as 'mosip' user, so we may not need to enable this +containerSecurityContext: + enabled: false + runAsUser: mosip + runAsNonRoot: true + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + enabled: false + fsGroup: 1001 + +## Pod affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAffinityPreset: "" + +## Pod anti-affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## Allowed values: soft, hard +## +nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + ## + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## Affinity for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Pod extra labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Annotations for server pods. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## pods' priority. +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +# priorityClassName: "" + +## lifecycleHooks for the container to automate configuration before or after startup. +## +lifecycleHooks: {} + +## Custom Liveness probes for +## +customLivenessProbe: {} + +## Custom Rediness probes +## +customReadinessProbe: {} + +## Update strategy - only really applicable for deployments with RWO PVs attached +## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the +## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will +## terminate the single previous pod, so that the new, incoming pod can attach to the PV +## +updateStrategy: + type: RollingUpdate + +## Additional environment variables to set +## Example: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] + +## ConfigMap with extra environment variables +## +extraEnvVarsCM: + - global + - db + - mosipcertmanager + - partner-ids-env + - esignet-namespace +## Secret with extra environment variables +## +extraEnvVarsSecret: + - postgres-postgresql + - keycloak-client-secrets + +## Extra volumes to add to the deployment +## +extraVolumes: [] + +## Extra volume mounts to add to the container +## +extraVolumeMounts: [] + +## Add init containers to the pods. +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## + +## Add sidecars to the pods. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + ## - myRegistryKeySecretName + ## Init containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## + limits: {} + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## cpu: 100m + ## memory: 128Mi + ## + +## Specifies whether RBAC resources should be created +## +rbac: + create: true + +## Specifies whether a ServiceAccount should be created +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + +## Prometheus Metrics +## +metrics: + enabled: false + ## Prometheus pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + + endpointPath: + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: true + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 10s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + # rules: + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + rules: [] + +## Admin swagger should have only internal access. Hence linked to internal gateway + +crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 ) + +mosipcertmanager: + configmaps: + db: + db-port: '5432' + db-su-user: 'postgres' + db-host: 'postgres.sandbox.mosip.net' + mosipcertmanager: + pre-expiry-days: '40' + partner-ids-env: + PARTNER_IDS_ENV: mpartner-default-print,mpartner-default-abis,mpartner-default-mobile,mpartner-default-digitalcard,mpartner-default-auth,mpartner-default-resident,mpartner-default-demo-oidc,mpartner-default-resident-oidc,mpartner-default-mimotooidc,mpartner-default-esignet,mpartner-default-mimotokeybinding + esignet-namespace: + ns_esignet: esignet + +enable_insecure: false diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..6df2197 --- /dev/null +++ b/pom.xml @@ -0,0 +1,9 @@ + + 4.0.0 + org.example + sonar-dummy + 1.0.0 + diff --git a/src/Dummy.java b/src/Dummy.java new file mode 100644 index 0000000..5e141bd --- /dev/null +++ b/src/Dummy.java @@ -0,0 +1,5 @@ +public class Dummy { + public static void main(String[] args) { + System.out.println("Hello from Sonar test."); + } +}