Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .config/make/tests.mak
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ test: core-deps test-deps ## Run all tests

.PHONY: e2e-server
e2e-server: ## Run e2e-server tests
sh $(shell pwd)/scripts/run.sh
bash $(shell pwd)/scripts/run.sh

.PHONY: probe-e2e
probe-e2e: ## Probe e2e tests
Expand Down
25 changes: 25 additions & 0 deletions .github/actions/cargo-build-jobs/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Copyright 2024 RustFS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

name: "Configure Cargo build parallelism"
description: >
Sets CARGO_BUILD_JOBS from CPU and memory (up to half of logical cores, reduced
when RAM per rustc job would be too low). Linux, macOS, and Windows (Git Bash).

runs:
using: composite
steps:
- name: Compute CARGO_BUILD_JOBS
shell: bash
run: bash "${GITHUB_WORKSPACE}/scripts/ci/compute-cargo-build-jobs.sh"
9 changes: 9 additions & 0 deletions .github/actions/setup/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,18 @@ inputs:
required: false
default: ""

configure-cargo-jobs:
description: "Set CARGO_BUILD_JOBS from CPU and available memory"
required: false
default: "true"

runs:
using: "composite"
steps:
- name: Configure Cargo build parallelism
if: inputs.configure-cargo-jobs == 'true'
uses: ./.github/actions/cargo-build-jobs

- name: Install system dependencies (Ubuntu)
if: runner.os == 'Linux'
shell: bash
Expand Down
5 changes: 3 additions & 2 deletions .github/s3tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ The `s3tests.conf` file is based on the official `s3tests.conf.SAMPLE` from the

### Key Configuration Points

- **Host**: Set via `${S3_HOST}` environment variable (e.g., `rustfs-single` for single-node, `lb` for multi-node)
- **Port**: 9000 (standard RustFS port)
- **Host**: Set via `${S3_HOST}` environment variable (e.g., `127.0.0.1` on the runner, `rustfs-single` inside `rustfs-net`)
- **Port**: Set via `${S3_PORT}` (host port; use `9000` when publishing `:9000`, or an ephemeral port when avoiding conflicts on shared runners)
- **Credentials**: Uses `${S3_ACCESS_KEY}` and `${S3_SECRET_KEY}` from workflow environment
- **TLS**: Disabled (`is_secure = False`)

Expand Down Expand Up @@ -62,6 +62,7 @@ docker run -d --name rustfs-single \

# Generate config
export S3_HOST=rustfs-single
export S3_PORT=9000
envsubst < .github/s3tests/s3tests.conf > /tmp/s3tests.conf

# Run tests
Expand Down
8 changes: 4 additions & 4 deletions .github/s3tests/s3tests.conf
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@
# Based on: https://github.com/ceph/s3-tests/blob/master/s3tests.conf.SAMPLE
#
# Usage:
# Single-node: S3_HOST=rustfs-single envsubst < s3tests.conf > /tmp/s3tests.conf
# Multi-node: S3_HOST=lb envsubst < s3tests.conf > /tmp/s3tests.conf
# export S3_HOST=127.0.0.1 S3_PORT=9000 # S3_PORT must match the published host port
# envsubst < s3tests.conf > /tmp/s3tests.conf

[DEFAULT]
## this section is just used for host, port and bucket_prefix

# host set for RustFS - will be substituted via envsubst
host = ${S3_HOST}

# port for RustFS
port = 9000
# port for RustFS (host port; default 9000 — set S3_PORT when mapping e.g. 19001:9000)
port = ${S3_PORT}

## say "False" to disable TLS
is_secure = False
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/audit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6

- name: Configure Cargo build parallelism
uses: ./.github/actions/cargo-build-jobs

- name: Install cargo-audit
uses: taiki-e/install-action@v2
with:
Expand Down
12 changes: 10 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ concurrency:
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
CARGO_BUILD_JOBS: 2

jobs:

Expand Down Expand Up @@ -153,7 +152,7 @@ jobs:
- name: Build debug binary
run: |
touch rustfs/build.rs
cargo build -p rustfs --bins --jobs 2
cargo build -p rustfs --bins

- name: Upload debug binary
uses: actions/upload-artifact@v6
Expand All @@ -173,6 +172,9 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6

- name: Configure Cargo build parallelism
uses: ./.github/actions/cargo-build-jobs

- name: Clean up previous test run
run: |
rm -rf /tmp/rustfs
Expand All @@ -187,6 +189,11 @@ jobs:
- name: Make binary executable
run: chmod +x ./target/debug/rustfs

- name: Install native build dependencies for s3s-e2e
run: |
sudo apt-get update -qq
sudo apt-get install -y build-essential cmake pkg-config

- name: Setup Rust toolchain for s3s-e2e installation
uses: dtolnay/rust-toolchain@stable

Expand Down Expand Up @@ -231,6 +238,7 @@ jobs:

- name: Run implemented s3-tests
run: |
export S3_PORT="$(python3 -c "import socket; s=socket.socket(); s.bind(('127.0.0.1',0)); print(s.getsockname()[1]); s.close()")"
DEPLOY_MODE=binary \
RUSTFS_BINARY=./target/debug/rustfs \
TEST_MODE=single \
Expand Down
31 changes: 20 additions & 11 deletions .github/workflows/e2e-s3tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,11 @@ jobs:

- name: Start single RustFS
run: |
S3_PORT=$(python3 -c "import socket; s=socket.socket(); s.bind(('127.0.0.1',0)); print(s.getsockname()[1]); s.close()")
echo "S3_PORT=${S3_PORT}" >> "$GITHUB_ENV"
docker run -d --name rustfs-single \
--network rustfs-net \
-p 9000:9000 \
-p "${S3_PORT}:9000" \
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
Expand All @@ -114,7 +116,7 @@ jobs:
- name: Wait for RustFS ready
run: |
for i in {1..60}; do
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
if curl -sf "http://127.0.0.1:${S3_PORT}/health" >/dev/null 2>&1; then
echo "RustFS is ready"
exit 0
fi
Expand All @@ -135,6 +137,7 @@ jobs:
- name: Generate s3tests config
run: |
export S3_HOST=127.0.0.1
export S3_PORT="${S3_PORT}"
envsubst < .github/s3tests/s3tests.conf > s3tests.conf

- name: Provision s3-tests alt user (required by suite)
Expand All @@ -148,7 +151,7 @@ jobs:
-X PUT \
-H 'Content-Type: application/json' \
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
"http://127.0.0.1:${S3_PORT}/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"

# Explicitly attach built-in policy via policy mapping.
# s3-tests relies on alt client being able to ListBuckets during setup cleanup.
Expand All @@ -158,7 +161,7 @@ jobs:
--access_key "${S3_ACCESS_KEY}" \
--secret_key "${S3_SECRET_KEY}" \
-X PUT \
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
"http://127.0.0.1:${S3_PORT}/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"

# Sanity check: alt user can list buckets (should not be AccessDenied).
awscurl \
Expand All @@ -167,7 +170,7 @@ jobs:
--access_key "${S3_ALT_ACCESS_KEY}" \
--secret_key "${S3_ALT_SECRET_KEY}" \
-X GET \
"http://127.0.0.1:9000/" >/dev/null
"http://127.0.0.1:${S3_PORT}/" >/dev/null

- name: Prepare s3-tests
run: |
Expand Down Expand Up @@ -255,9 +258,14 @@ jobs:
-t rustfs-ci \
-f Dockerfile.source .

- name: Pick S3 host port for load balancer
run: |
S3_PORT=$(python3 -c "import socket; s=socket.socket(); s.bind(('127.0.0.1',0)); print(s.getsockname()[1]); s.close()")
echo "S3_PORT=${S3_PORT}" >> "$GITHUB_ENV"

- name: Prepare cluster compose
run: |
cat > compose.yml <<'EOF'
cat > compose.yml <<EOF
services:
rustfs1:
image: rustfs-ci
Expand Down Expand Up @@ -308,7 +316,7 @@ jobs:
hostname: lb
networks: [rustfs-net]
ports:
- "9000:9000"
- "${S3_PORT}:9000"
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
networks:
Expand Down Expand Up @@ -346,7 +354,7 @@ jobs:
- name: Wait for LB ready
run: |
for i in {1..90}; do
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
if curl -sf "http://127.0.0.1:${S3_PORT}/health" >/dev/null 2>&1; then
echo "Load balancer is ready"
exit 0
fi
Expand All @@ -359,6 +367,7 @@ jobs:
- name: Generate s3tests config
run: |
export S3_HOST=127.0.0.1
export S3_PORT="${S3_PORT}"
envsubst < .github/s3tests/s3tests.conf > s3tests.conf

- name: Provision s3-tests alt user (required by suite)
Expand All @@ -371,23 +380,23 @@ jobs:
-X PUT \
-H 'Content-Type: application/json' \
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
"http://127.0.0.1:${S3_PORT}/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"

awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ACCESS_KEY}" \
--secret_key "${S3_SECRET_KEY}" \
-X PUT \
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
"http://127.0.0.1:${S3_PORT}/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"

awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ALT_ACCESS_KEY}" \
--secret_key "${S3_ALT_SECRET_KEY}" \
-X GET \
"http://127.0.0.1:9000/" >/dev/null
"http://127.0.0.1:${S3_PORT}/" >/dev/null

- name: Prepare s3-tests
run: |
Expand Down
7 changes: 5 additions & 2 deletions .github/workflows/nix.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6

- name: Configure Cargo / Nix build parallelism
uses: ./.github/actions/cargo-build-jobs

- name: Install Nix
uses: cachix/install-nix-action@v31
with:
Expand All @@ -65,12 +68,12 @@ jobs:
run: |
echo "Checking flake structure and evaluation..."
nix flake show
nix flake check --print-build-logs
nix flake check --max-jobs "$CARGO_BUILD_JOBS" --print-build-logs

- name: Build RustFS
run: |
echo "Building the default package..."
nix build .#default --print-build-logs
nix build .#default --max-jobs "$CARGO_BUILD_JOBS" --print-build-logs

- name: Test Binary
run: |
Expand Down
16 changes: 16 additions & 0 deletions build-rustfs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,20 @@

set -e

# Default rustc parallelism when not set (e.g. invoking this script without make).
ensure_cargo_build_jobs() {
if [[ -n "${CARGO_BUILD_JOBS:-}" ]]; then
return 0
fi
local repo_root
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
local compute="${repo_root}/scripts/ci/compute-cargo-build-jobs.sh"
if [[ -f "$compute" ]]; then
CARGO_BUILD_JOBS="$(bash "$compute" --value-only 2>/dev/null || echo 2)"
export CARGO_BUILD_JOBS
fi
}

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
Expand Down Expand Up @@ -588,6 +602,8 @@ main() {
exit 1
fi

ensure_cargo_build_jobs

# Override platform if specified
if [ -n "$CUSTOM_PLATFORM" ]; then
PLATFORM="$CUSTOM_PLATFORM"
Expand Down
35 changes: 33 additions & 2 deletions crates/e2e_test/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,15 @@
//! - AWS S3 client creation and configuration
//! - Basic health checks and server readiness detection
//! - Common test constants and utilities
//!
//! ## Shared self-hosted runners
//!
//! - `RUSTFS_E2E_EXTERNAL_ADDR`: `host:port` for tests that expect a **pre-started** RustFS (policy
//! suite, `scripts/run_e2e_tests.sh`). Default `127.0.0.1:9000`.
//! - `RUSTFS_E2E_EXTERNAL_URL`: optional full base URL (`http://host:port`) for reliant tests;
//! default is `http://` + [`external_rustfs_socket_addr`].
//! - `RUSTFS_E2E_KILL_EXISTING`: set to `1`/`true`/`yes` to run `pkill` before spawning test
//! servers. **Off by default** so a long-lived RustFS on `:9000` is not killed.

use aws_sdk_s3::config::{Credentials, Region};
use aws_sdk_s3::{Client, Config};
Expand All @@ -37,6 +46,23 @@ use uuid::Uuid;
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
pub const TEST_BUCKET: &str = "e2e-test-bucket";

/// `host:port` for tests that attach to a RustFS started outside the test process.
pub fn external_rustfs_socket_addr() -> String {
std::env::var("RUSTFS_E2E_EXTERNAL_ADDR").unwrap_or_else(|_| "127.0.0.1:9000".to_string())
}

/// HTTP endpoint for ignored reliant tests (external RustFS).
pub fn external_rustfs_http_url() -> String {
std::env::var("RUSTFS_E2E_EXTERNAL_URL").unwrap_or_else(|_| format!("http://{}", external_rustfs_socket_addr()))
}

fn kill_existing_rustfs_processes_enabled() -> bool {
matches!(
std::env::var("RUSTFS_E2E_KILL_EXISTING").ok().as_deref(),
Some("1" | "true" | "TRUE" | "yes" | "YES")
)
}
pub fn workspace_root() -> PathBuf {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.pop(); // e2e_test
Expand Down Expand Up @@ -181,9 +207,14 @@ impl RustFSTestEnvironment {
Ok(port)
}

/// Kill any existing RustFS processes
/// Kill any existing RustFS processes (only if `RUSTFS_E2E_KILL_EXISTING` is set).
pub async fn cleanup_existing_processes(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
info!("Cleaning up any existing RustFS processes");
if !kill_existing_rustfs_processes_enabled() {
info!("Skipping pkill of RustFS (set RUSTFS_E2E_KILL_EXISTING=1 to enable)");
return Ok(());
}

info!("Cleaning up any existing RustFS processes (RUSTFS_E2E_KILL_EXISTING is set)");
let binary_path = rustfs_binary_path();
let binary_name = binary_path.to_string_lossy();
let output = Command::new("pkill").args(["-f", &binary_name]).output();
Expand Down
Loading
Loading