diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index c004dd098..8cf892a2b 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -51,6 +51,13 @@ functions: ### Setup Functions ### + setup_jq: &setup_jq + command: subprocess.exec + type: setup + params: + working_dir: src/github.com/mongodb/mongodb-kubernetes + binary: scripts/evergreen/setup_jq.sh + setup_context: &setup_context # Running the first switch is important to fill the workdir and other important initial env vars command: shell.exec type: setup @@ -103,6 +110,7 @@ functions: type: setup params: command: "git config --global user.email 'kubernetes-hosted-team@mongodb.com'" + - *setup_jq # we need jq in the context - *setup_context setup_kubectl: &setup_kubectl @@ -112,13 +120,6 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes binary: scripts/evergreen/setup_kubectl.sh - setup_jq: &setup_jq - command: subprocess.exec - type: setup - params: - working_dir: src/github.com/mongodb/mongodb-kubernetes - binary: scripts/evergreen/setup_jq.sh - setup_shellcheck: command: subprocess.exec type: setup @@ -261,6 +262,18 @@ functions: - *setup_evg_host - *python_venv + # This differs for normal evg_host as we require minikube instead of kind for + # IBM machines and install aws cli via pip instead + setup_building_host_minikube: + - *switch_context + - command: subprocess.exec + type: setup + params: + working_dir: src/github.com/mongodb/mongodb-kubernetes + add_to_path: + - ${workdir}/bin + command: scripts/evergreen/setup_minikube_host.sh + prune_docker_resources: - command: subprocess.exec type: setup @@ -328,7 +341,7 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes script: | source .generated/context.export.env - scripts/evergreen/e2e/setup_cloud_qa.py create + scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py create # The additional switch is needed, since we now have created the needed OM exports. - *switch_context diff --git a/.evergreen.yml b/.evergreen.yml index 209bf152a..136212703 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -77,6 +77,12 @@ variables: - func: download_kube_tools - func: setup_building_host + - &setup_group_ibm + setup_group_can_fail_task: true + setup_group: + - func: clone + - func: setup_building_host_minikube + - &setup_group_multi_cluster setup_group_can_fail_task: true setup_group: @@ -134,7 +140,7 @@ variables: - &teardown_group teardown_group: - - func: prune_docker_resources + - func: teardown_kubernetes_environment - func: run_retry_script - &base_om7_dependency @@ -1188,6 +1194,14 @@ task_groups: - e2e_om_ops_manager_backup <<: *teardown_group + - name: e2e_smoke_ibm_task_group + max_hosts: -1 + <<: *setup_group_ibm + <<: *setup_and_teardown_task_cloudqa + tasks: + - e2e_replica_set + <<: *teardown_group + - name: e2e_ops_manager_kind_5_0_only_task_group max_hosts: -1 <<: *setup_group @@ -1453,6 +1467,30 @@ buildvariants: tasks: - name: e2e_smoke_task_group + - name: e2e_smoke_ibm_power + display_name: e2e_smoke_ibm_power + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-power-large + allowed_requesters: [ "patch", "github_tag" ] +# depends_on: +# - name: build_test_image +# variant: init_test_run + tasks: + - name: e2e_smoke_ibm_task_group + + - name: e2e_smoke_ibm_z + display_name: e2e_smoke_ibm_z + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-zseries-small + allowed_requesters: [ "patch", "github_tag" ] +# depends_on: +# - name: build_test_image +# variant: init_test_run + tasks: + - name: e2e_smoke_ibm_task_group + - name: e2e_static_smoke display_name: e2e_static_smoke tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index dfcb14f0b..a36896970 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -8,7 +8,34 @@ source scripts/funcs/checks source scripts/funcs/printing source scripts/funcs/kubernetes +# Detect available container runtime +detect_container_runtime() { + if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then + CONTAINER_RUNTIME="podman" + # Use root's auth.json since minikube uses sudo podman + CONFIG_PATH="/root/.config/containers/auth.json" + sudo mkdir -p "$(dirname "${CONFIG_PATH}")" + echo "Using Podman for container authentication (sudo mode)" + return 0 + elif command -v docker &> /dev/null; then + CONTAINER_RUNTIME="docker" + CONFIG_PATH="${HOME}/.docker/config.json" + mkdir -p "$(dirname "${CONFIG_PATH}")" + echo "Using Docker for container authentication" + return 0 + else + echo "Error: Neither Docker nor Podman is available" + exit 1 + fi +} + check_docker_daemon_is_running() { + if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then + # Podman doesn't require a daemon + echo "Using Podman (no daemon required)" + return 0 + fi + if [[ "$(uname -s)" != "Linux" ]]; then echo "Skipping docker daemon check when not running in Linux" return 0 @@ -34,71 +61,131 @@ check_docker_daemon_is_running() { remove_element() { config_option="${1}" tmpfile=$(mktemp) - jq 'del(.'"${config_option}"')' ~/.docker/config.json >"${tmpfile}" - cp "${tmpfile}" ~/.docker/config.json + + # Initialize config file if it doesn't exist + if [[ ! -f "${CONFIG_PATH}" ]]; then + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + echo '{}' | sudo tee "${CONFIG_PATH}" > /dev/null + else + echo '{}' > "${CONFIG_PATH}" + fi + fi + + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + sudo "${PROJECT_DIR:-.}/bin/jq" 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" + sudo cp "${tmpfile}" "${CONFIG_PATH}" + else + "${PROJECT_DIR:-.}/bin/jq" 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" + cp "${tmpfile}" "${CONFIG_PATH}" + fi rm "${tmpfile}" } -# This is the script which performs docker authentication to different registries that we use (so far ECR and RedHat) -# As the result of this login the ~/.docker/config.json will have all the 'auth' information necessary to work with docker registries +# Container runtime login wrapper +container_login() { + local username="$1" + local registry="$2" + + if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then + sudo podman login --authfile "${CONFIG_PATH}" --username "${username}" --password-stdin "${registry}" + else + docker login --username "${username}" --password-stdin "${registry}" + fi +} + +# This is the script which performs container authentication to different registries that we use (so far ECR and RedHat) +# As the result of this login the config file will have all the 'auth' information necessary to work with container registries + +# Detect container runtime and set appropriate config path +detect_container_runtime check_docker_daemon_is_running -if [[ -f ~/.docker/config.json ]]; then +# Initialize config file if it doesn't exist +if [[ ! -f "${CONFIG_PATH}" ]]; then + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + echo '{}' | sudo tee "${CONFIG_PATH}" > /dev/null + else + echo '{}' > "${CONFIG_PATH}" + fi +fi + +if [[ -f "${CONFIG_PATH}" ]]; then if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then - # Check if login is actually required by making a HEAD request to ECR using existing Docker config - echo "Checking if Docker credentials are valid..." - ecr_auth=$(jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' ~/.docker/config.json) + # Check if login is actually required by making a HEAD request to ECR using existing credentials + echo "Checking if container registry credentials are valid..." + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + ecr_auth=$(sudo "${PROJECT_DIR:-.}/bin/jq" -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") + else + ecr_auth=$("${PROJECT_DIR:-.}/bin/jq" -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") + fi if [[ -n "${ecr_auth}" ]]; then http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://268558157000.dkr.ecr.us-east-1.amazonaws.com/v2/dev/mongodb-kubernetes/manifests/latest" \ -H "Authorization: Basic ${ecr_auth}" 2>/dev/null || echo "error/timeout") if [[ "${http_status}" != "401" && "${http_status}" != "403" && "${http_status}" != "error/timeout" ]]; then - echo "Docker credentials are up to date - not performing the new login!" + echo "Container registry credentials are up to date - not performing the new login!" exit fi - echo "Docker login required (HTTP status: ${http_status})" + echo "Container login required (HTTP status: ${http_status})" else - echo "No ECR credentials found in Docker config - login required" + echo "No ECR credentials found in container config - login required" fi fi - title "Performing docker login to ECR registries" + title "Performing container login to ECR registries" - # There could be some leftovers on Evergreen - if grep -q "credsStore" ~/.docker/config.json; then - remove_element "credsStore" - fi - if grep -q "credHelpers" ~/.docker/config.json; then - remove_element "credHelpers" + # There could be some leftovers on Evergreen (Docker-specific, skip for Podman) + if [[ "${CONTAINER_RUNTIME}" == "docker" ]]; then + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + if sudo grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + fi + if sudo grep -q "credHelpers" "${CONFIG_PATH}"; then + remove_element "credHelpers" + fi + else + if grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + fi + if grep -q "credHelpers" "${CONFIG_PATH}"; then + remove_element "credHelpers" + fi + fi fi fi echo "$(aws --version)}" -aws ecr get-login-password --region "us-east-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.us-east-1.amazonaws.com +aws ecr get-login-password --region "us-east-1" | container_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com" # by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json # We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element -if grep -q "credsStore" ~/.docker/config.json; then +# This is Docker-specific behavior, Podman stores credentials directly in auth.json +if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && (([[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]] && sudo grep -q "credsStore" "${CONFIG_PATH}") || ([[ "${CONFIG_PATH}" != "/root/.config/containers/auth.json" ]] && grep -q "credsStore" "${CONFIG_PATH}")); then remove_element "credsStore" # login again to store the credentials into the config.json - aws ecr get-login-password --region "us-east-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.us-east-1.amazonaws.com + aws ecr get-login-password --region "us-east-1" | container_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com" fi -aws ecr get-login-password --region "eu-west-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.eu-west-1.amazonaws.com +aws ecr get-login-password --region "eu-west-1" | container_login "AWS" "268558157000.dkr.ecr.eu-west-1.amazonaws.com" if [[ -n "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then # log in to quay.io for the mongodb/mongodb-search-community private repo # TODO remove once we switch to the official repo in Public Preview quay_io_auth_file=$(mktemp) - docker_configjson_tmp=$(mktemp) + config_tmp=$(mktemp) echo "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}" - jq -s '.[0] * .[1]' "${quay_io_auth_file}" ~/.docker/config.json > "${docker_configjson_tmp}" - mv "${docker_configjson_tmp}" ~/.docker/config.json + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + sudo jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + sudo mv "${config_tmp}" "${CONFIG_PATH}" + else + jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + mv "${config_tmp}" "${CONFIG_PATH}" + fi rm "${quay_io_auth_file}" fi diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power new file mode 100644 index 000000000..4ba998050 --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5 +export CUSTOM_MDB_PREV_VERSION=5.0.7 +export KUBE_ENVIRONMENT_NAME=minikube diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z new file mode 100644 index 000000000..4ba998050 --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5 +export CUSTOM_MDB_PREV_VERSION=5.0.7 +export KUBE_ENVIRONMENT_NAME=minikube diff --git a/scripts/dev/contexts/evg-private-context b/scripts/dev/contexts/evg-private-context index 8f25842e8..8c1dfd672 100644 --- a/scripts/dev/contexts/evg-private-context +++ b/scripts/dev/contexts/evg-private-context @@ -122,3 +122,11 @@ export cognito_workload_url="${cognito_workload_url}" export cognito_workload_user_id="${cognito_workload_user_id}" export MDB_UPDATE_LICENSES=true + +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index fb1f9ab8f..bd8b411db 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -4,7 +4,56 @@ set -Eeou pipefail -source scripts/dev/set_env_context.sh +ensure_required_python() { + local required_version="${PYTHON_VERSION:-3.10}" + local major_minor + major_minor=$(echo "${required_version}" | grep -oE '^[0-9]+\.[0-9]+') + + echo "Checking for Python ${required_version} (${major_minor}.x)..." >&2 + + # Check if current python matches required version + if command -v python3 &> /dev/null; then + local version + if version=$(python3 --version 2>&1) && [[ "${version}" == *"Python ${major_minor}"* ]]; then + echo "Found Python ${major_minor}: ${version}" >&2 + echo "python3" + return 0 + else + echo "Current python3 version: ${version}" >&2 + fi + fi + + # Try to install required Python version using pyenv if available + if command -v pyenv &> /dev/null; then + echo "Python ${major_minor} not found. Attempting to install via pyenv..." >&2 + + # Check if any version in the required series is already installed + if pyenv versions --bare | grep -q "^${major_minor}\."; then + local installed_version + installed_version=$(pyenv versions --bare | grep "^${major_minor}\." | head -1) + echo "Found existing pyenv Python ${major_minor}: ${installed_version}" >&2 + pyenv global "${installed_version}" + echo "python3" + return 0 + fi + + # Install latest version in the required series + local latest_version + latest_version=$(pyenv install --list | grep -E "^[[:space:]]*${major_minor}\.[0-9]+$" | tail -1 | xargs) + if [[ -n "${latest_version}" ]]; then + echo "Installing Python ${latest_version} via pyenv..." >&2 + if pyenv install "${latest_version}"; then + pyenv global "${latest_version}" + echo "python3" + return 0 + fi + fi + fi + + echo "Error: No suitable Python ${major_minor} installation found and unable to install via pyenv." >&2 + echo "Please ensure Python ${major_minor} is installed or pyenv is available." >&2 + return 1 +} if [[ -d "${PROJECT_DIR}"/venv ]]; then echo "Removing venv..." @@ -12,17 +61,15 @@ if [[ -d "${PROJECT_DIR}"/venv ]]; then rm -rf "venv" fi -# in our EVG hosts, python versions are always in /opt/python -python_bin="/opt/python/${PYTHON_VERSION}/bin/python3" -if [[ "$(uname)" == "Darwin" ]]; then - python_bin="python${PYTHON_VERSION}" -fi +# Ensure required Python version is available +python_bin=$(ensure_required_python) echo "Using python from the following path: ${python_bin}" "${python_bin}" -m venv venv source venv/bin/activate pip install --upgrade pip +echo "Installing requirements.txt..." pip install -r requirements.txt echo "Python venv was recreated successfully." echo "Current python path: $(which python)" diff --git a/scripts/evergreen/setup_aws.sh b/scripts/evergreen/setup_aws.sh index 931eb0a36..5563a8a30 100755 --- a/scripts/evergreen/setup_aws.sh +++ b/scripts/evergreen/setup_aws.sh @@ -3,16 +3,116 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh -INSTALL_DIR="${workdir:?}/.local/lib/aws" -BIN_LOCATION="${workdir}/bin" +# Detect system architecture +detect_architecture() { + local arch + arch=$(uname -m) + echo "Detected architecture: ${arch}" >&2 + echo "${arch}" +} -mkdir -p "${BIN_LOCATION}" +# Install AWS CLI v2 via binary download (for x86_64 and aarch64) +install_aws_cli_binary() { + local arch="$1" + echo "Installing AWS CLI v2 via binary download for ${arch}..." -tmpdir=$(mktemp -d) -cd "${tmpdir}" + # Map architecture names for AWS CLI download URLs + local aws_arch + case "${arch}" in + x86_64) + aws_arch="x86_64" + ;; + aarch64|arm64) + aws_arch="aarch64" + ;; + *) + echo "Error: Unsupported architecture for binary installation: ${arch}" >&2 + return 1 + ;; + esac -curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" -unzip awscliv2.zip &> /dev/null + # Download and install AWS CLI v2 + local temp_dir + temp_dir=$(mktemp -d) + cd "${temp_dir}" + + echo "Downloading AWS CLI v2 for ${aws_arch}..." + curl -s "https://awscli.amazonaws.com/awscli-exe-linux-${aws_arch}.zip" -o "awscliv2.zip" + + unzip -q awscliv2.zip + sudo ./aws/install --update + + # Clean up + cd - > /dev/null + rm -rf "${temp_dir}" + + # Verify installation + if command -v aws &> /dev/null; then + echo "AWS CLI v2 installed successfully:" + aws --version + else + echo "Error: AWS CLI v2 installation failed" >&2 + return 1 + fi +} + +# Install AWS CLI v1 via pip (for IBM architectures: ppc64le, s390x) +install_aws_cli_pip() { + echo "Installing AWS CLI v1 via pip (for IBM architectures)..." + + # Ensure pip is available + if ! command -v pip3 &> /dev/null && ! command -v pip &> /dev/null; then + echo "Error: pip is not available. Please install Python and pip first." >&2 + return 1 + fi + + # Use pip3 if available, otherwise pip + local pip_cmd="pip3" + if ! command -v pip3 &> /dev/null; then + pip_cmd="pip" + fi + + echo "Installing AWS CLI using ${pip_cmd}..." + ${pip_cmd} install --user awscli + + # Add ~/.local/bin to PATH if not already there (where pip --user installs) + if [[ ":$PATH:" != *":$HOME/.local/bin:"* ]]; then + export PATH="$HOME/.local/bin:$PATH" + echo "Added ~/.local/bin to PATH" + fi + + # Verify installation + if command -v aws &> /dev/null; then + echo "AWS CLI v1 installed successfully:" + aws --version + else + echo "Error: AWS CLI v1 installation failed or not found in PATH" >&2 + return 1 + fi +} + +# Main installation logic +install_aws_cli() { + local arch + arch=$(detect_architecture) + + case "${arch}" in + ppc64le|s390x) + echo "IBM architecture detected (${arch}). Using pip installation..." + install_aws_cli_pip + ;; + x86_64|aarch64|arm64) + echo "Standard architecture detected (${arch}). Using binary installation..." + install_aws_cli_binary "${arch}" + ;; + *) + echo "Warning: Unknown architecture ${arch}. Falling back to pip installation..." + install_aws_cli_pip + ;; + esac +} + +install_aws_cli docker_dir="/home/${USER}/.docker" if [[ ! -d "${docker_dir}" ]]; then @@ -21,7 +121,5 @@ fi sudo chown "${USER}":"${USER}" "${docker_dir}" -R sudo chmod g+rwx "${docker_dir}" -R -sudo ./aws/install --bin-dir "${BIN_LOCATION}" --install-dir "${INSTALL_DIR}" --update -cd - -rm -rf "${tmpdir}" +echo "AWS CLI setup completed successfully." diff --git a/scripts/evergreen/setup_jq.sh b/scripts/evergreen/setup_jq.sh index e21d4a07e..8905d4d46 100755 --- a/scripts/evergreen/setup_jq.sh +++ b/scripts/evergreen/setup_jq.sh @@ -7,7 +7,34 @@ set -Eeou pipefail -source scripts/dev/set_env_context.sh source scripts/funcs/install -download_and_install_binary "${PROJECT_DIR:-.}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64" +# Detect and map architecture for jq releases +detect_jq_architecture() { + local arch + arch=$(uname -m) + + case "${arch}" in + x86_64) + echo "amd64" + ;; + aarch64|arm64) + echo "arm64" + ;; + ppc64le) + echo "ppc64el" # jq uses ppc64el instead of ppc64le + ;; + s390x) + echo "s390x" + ;; + *) + echo "Error: Unsupported architecture for jq: ${arch}" >&2 + exit 1 + ;; + esac +} + +jq_arch=$(detect_jq_architecture) +echo "Detected architecture: $(uname -m), using jq architecture: ${jq_arch}" + +download_and_install_binary "${PROJECT_DIR:-${workdir:-.}}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" diff --git a/scripts/evergreen/setup_kubectl.sh b/scripts/evergreen/setup_kubectl.sh index ab9066ac1..00cf975fd 100755 --- a/scripts/evergreen/setup_kubectl.sh +++ b/scripts/evergreen/setup_kubectl.sh @@ -3,22 +3,52 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +# Detect system architecture and map to kubectl/helm architecture names +detect_architecture() { + local arch + arch=$(uname -m) + + case "${arch}" in + x86_64) + echo "amd64" + ;; + aarch64|arm64) + echo "arm64" + ;; + ppc64le) + echo "ppc64le" + ;; + s390x) + echo "s390x" + ;; + *) + echo "Unsupported architecture: ${arch}" >&2 + echo "Supported architectures: x86_64 (amd64), aarch64 (arm64), ppc64le, s390x" >&2 + exit 1 + ;; + esac +} + +# Detect the current architecture +ARCH=$(detect_architecture) +echo "Detected architecture: ${ARCH}" + bindir="${PROJECT_DIR}/bin" tmpdir="${PROJECT_DIR}/tmp" mkdir -p "${bindir}" "${tmpdir}" -echo "Downloading latest kubectl" -curl -s --retry 3 -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +echo "Downloading latest kubectl for ${ARCH}" +curl -s --retry 3 -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl" chmod +x kubectl echo "kubectl version --client" ./kubectl version --client mv kubectl "${bindir}" -echo "Downloading helm" +echo "Downloading helm for ${ARCH}" helm_archive="${tmpdir}/helm.tgz" helm_version="v3.17.1" -curl -s https://get.helm.sh/helm-${helm_version}-linux-amd64.tar.gz --output "${helm_archive}" +curl -s https://get.helm.sh/helm-${helm_version}-linux-${ARCH}.tar.gz --output "${helm_archive}" tar xfz "${helm_archive}" -C "${tmpdir}" &> /dev/null -mv "${tmpdir}/linux-amd64/helm" "${bindir}" +mv "${tmpdir}/linux-${ARCH}/helm" "${bindir}" "${bindir}"/helm version diff --git a/scripts/evergreen/setup_kubernetes_environment.sh b/scripts/evergreen/setup_kubernetes_environment.sh index 707231c9f..6edaad50d 100755 --- a/scripts/evergreen/setup_kubernetes_environment.sh +++ b/scripts/evergreen/setup_kubernetes_environment.sh @@ -30,6 +30,8 @@ elif [ "${KUBE_ENVIRONMENT_NAME}" = "kind" ] || [ "${KUBE_ENVIRONMENT_NAME}" = " scripts/dev/recreate_kind_cluster.sh "kind" elif [[ "${KUBE_ENVIRONMENT_NAME}" = "multi" && "${CLUSTER_TYPE}" == "kind" ]]; then scripts/dev/recreate_kind_clusters.sh +elif [[ "${KUBE_ENVIRONMENT_NAME}" = "minikube" ]]; then + echo "Nothing to do for minikube" else echo "KUBE_ENVIRONMENT_NAME not recognized" echo "value is <<${KUBE_ENVIRONMENT_NAME}>>. If empty it means it was not set" diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh new file mode 100755 index 000000000..f4f5d643b --- /dev/null +++ b/scripts/evergreen/setup_minikube_host.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# Consolidated setup script for minikube host with multi-architecture support +# This script groups all the setup steps needed for IBM machines and other architectures +# Can be run on static hosts for testing and verification + +source scripts/dev/set_env_context.sh +set -Eeoux pipefail + +echo "==========================================" +echo "Setting up minikube host with multi-architecture support" +echo "Architecture: $(uname -m)" +echo "OS: $(uname -s)" +echo "==========================================" + +# Function to run a setup step with error handling and logging +run_setup_step() { + local step_name="$1" + shift + local script_command=("$@") + + echo "" + echo ">>> Running: ${step_name}" + echo ">>> Command: ${script_command[*]}" + + local script_path="${script_command[0]}" + if [[ -f "${script_path}" ]]; then + if "${script_command[@]}"; then + echo "✅ ${step_name} completed successfully" + else + echo "❌ ${step_name} failed" + exit 1 + fi + else + echo "❌ Script not found: ${script_path}" + exit 1 + fi +} + +# Setup Python environment (needed for AWS CLI pip installation) + +export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 +run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" + +run_setup_step "AWS CLI Setup" "scripts/evergreen/setup_aws.sh" + +run_setup_step "kubectl and helm Setup" "scripts/evergreen/setup_kubectl.sh" + +run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" + +run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" + +run_setup_step "Container Registry Authentication" "scripts/dev/configure_docker_auth.sh" + +# The minikube cluster is already started by the setup_minikube_host.sh script +echo "" +echo ">>> Minikube cluster startup completed by setup_minikube_host.sh" +echo "✅ Minikube cluster is ready for use" + +echo "" +echo "==========================================" +echo "✅ Minikube host setup completed successfully!" +echo "==========================================" +echo "" +echo "Installed tools summary:" +echo "- Python: $(python --version 2>/dev/null || python3 --version 2>/dev/null || echo 'Not found')" +echo "- AWS CLI: $(aws --version 2>/dev/null || echo 'Not found')" +echo "- kubectl: $(kubectl version --client 2>/dev/null || echo 'Not found')" +echo "- helm: $(helm version --short 2>/dev/null || echo 'Not found')" +echo "- jq: $(jq --version 2>/dev/null || echo 'Not found')" +echo "- Container Runtime: $(command -v podman &>/dev/null && echo "Podman $(podman --version 2>/dev/null)" || command -v docker &>/dev/null && echo "Docker $(docker --version 2>/dev/null)" || echo "Not found")" +echo "- Minikube: $(./bin/minikube version --short 2>/dev/null || echo 'Not found')" +echo "" +echo "Setup complete! Host is ready for minikube operations." diff --git a/scripts/evergreen/teardown_kubernetes_environment.sh b/scripts/evergreen/teardown_kubernetes_environment.sh index e5e2bd869..a9babfbed 100755 --- a/scripts/evergreen/teardown_kubernetes_environment.sh +++ b/scripts/evergreen/teardown_kubernetes_environment.sh @@ -5,6 +5,12 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh if [ "${KUBE_ENVIRONMENT_NAME}" = "kind" ]; then + docker system prune -a -f echo "Deleting Kind cluster" kind delete clusters --all fi + +if [ "${KUBE_ENVIRONMENT_NAME}" = "minikube" ]; then + echo "Deleting minikube cluster" + "${PROJECT_DIR:-.}/bin/minikube" delete +fi diff --git a/scripts/funcs/kubernetes b/scripts/funcs/kubernetes index 5377d8927..11250422d 100644 --- a/scripts/funcs/kubernetes +++ b/scripts/funcs/kubernetes @@ -98,15 +98,40 @@ create_image_registries_secret() { context=$1 namespace=$2 secret_name=$3 + + # Detect the correct config file path based on container runtime + local config_file + local temp_config_file="" + if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then + # For Podman, use root's auth.json since minikube uses sudo podman + config_file="/root/.config/containers/auth.json" + echo "Using Podman config: ${config_file}" + + # Create a temporary copy that the current user can read + temp_config_file=$(mktemp) + sudo cp "${config_file}" "${temp_config_file}" + sudo chown "$(whoami):$(whoami)" "${temp_config_file}" + config_file="${temp_config_file}" + else + # For Docker, use standard docker config + config_file="${HOME}/.docker/config.json" + echo "Using Docker config: ${config_file}" + fi + # shellcheck disable=SC2154 if kubectl --context "${context}" get namespace "${namespace}"; then kubectl --context "${context}" -n "${namespace}" delete secret "${secret_name}" --ignore-not-found echo "${context}: Creating ${namespace}/${secret_name} pull secret" kubectl --context "${context}" -n "${namespace}" create secret generic "${secret_name}" \ - --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson + --from-file=.dockerconfigjson="${config_file}" --type=kubernetes.io/dockerconfigjson else echo "Skipping creating pull secret in ${context}/${namespace}. The namespace doesn't exist yet." fi + + # Clean up temporary file + if [[ -n "${temp_config_file}" ]] && [[ -f "${temp_config_file}" ]]; then + rm -f "${temp_config_file}" + fi } echo "Creating/updating pull secret from docker configured file" diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh new file mode 100755 index 000000000..527b27543 --- /dev/null +++ b/scripts/minikube/install-minikube.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +set -Eeou pipefail + +source scripts/dev/set_env_context.sh +source scripts/funcs/install + +# Detect architecture +ARCH=$(uname -m) +case "${ARCH}" in + x86_64) + MINIKUBE_ARCH="amd64" + ;; + aarch64) + MINIKUBE_ARCH="arm64" + ;; + ppc64le) + MINIKUBE_ARCH="ppc64le" + ;; + s390x) + MINIKUBE_ARCH="s390x" + ;; + *) + echo "Error: Unsupported architecture: ${ARCH}" + echo "Supported architectures: x86_64, aarch64, ppc64le, s390x" + exit 1 + ;; +esac + +echo "Installing minikube on ${ARCH} architecture..." + +# Install crictl (container runtime CLI) +echo "Installing crictl for ${ARCH}..." +CRICTL_VERSION=$(curl -s https://api.github.com/repos/kubernetes-sigs/cri-tools/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + +# Download and extract crictl tar.gz +mkdir -p "${PROJECT_DIR:-.}/bin" +CRICTL_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${MINIKUBE_ARCH}.tar.gz" +echo "Downloading ${CRICTL_URL}" +TEMP_DIR=$(mktemp -d) +curl --retry 3 --silent -L "${CRICTL_URL}" -o "${TEMP_DIR}/crictl.tar.gz" +tar -xzf "${TEMP_DIR}/crictl.tar.gz" -C "${TEMP_DIR}/" +chmod +x "${TEMP_DIR}/crictl" +mv "${TEMP_DIR}/crictl" "${PROJECT_DIR:-.}/bin/crictl" +rm -rf "${TEMP_DIR}" +echo "Installed crictl to ${PROJECT_DIR:-.}/bin" + +# Also install crictl system-wide so minikube can find it +echo "Installing crictl system-wide..." +if [[ -f "${PROJECT_DIR:-.}/bin/crictl" ]]; then + # Install to both /usr/local/bin and /usr/bin for better PATH coverage + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/local/bin/crictl + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/bin/crictl + sudo chmod +x /usr/local/bin/crictl + sudo chmod +x /usr/bin/crictl + echo "✅ crictl installed to /usr/local/bin/ and /usr/bin/" + + # Verify installation + if command -v crictl >/dev/null 2>&1; then + echo "✅ crictl is now available in PATH: $(which crictl)" + echo "✅ crictl version: $(crictl --version 2>/dev/null || echo 'version check failed')" + else + echo "⚠️ crictl installed but not found in PATH" + fi +else + echo "⚠️ crictl not found in project bin, minikube may have issues" +fi + +# Install minikube +echo "Installing minikube for ${ARCH}..." +MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + +# Download minikube for detected architecture +download_and_install_binary "${PROJECT_DIR:-.}/bin" minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-${MINIKUBE_ARCH}" + +echo "Crictl ${CRICTL_VERSION} and Minikube ${MINIKUBE_VERSION} installed successfully for ${ARCH}" diff --git a/scripts/minikube/minikube_host.sh b/scripts/minikube/minikube_host.sh new file mode 100755 index 000000000..09c5b80f7 --- /dev/null +++ b/scripts/minikube/minikube_host.sh @@ -0,0 +1,212 @@ +#!/usr/bin/env bash + +# This is a helper script for running tests on s390x Hosts. +# It allows to configure minikube clusters and expose remote API servers on a local machine to +# enable local development while running minikube cluster on s390x instance. +# Run "minikube_host.sh help" command to see the full usage. +# Similar to evg_host.sh but uses minikube instead of kind. + +set -Eeou pipefail + +test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x + +source scripts/dev/set_env_context.sh +source scripts/funcs/printing + +if [[ -z "${S390_HOST_NAME}" ]]; then + echo "S390_HOST_NAME env var is missing" + echo "Set it to your s390x host connection string (e.g., user@hostname)" + exit 1 +fi + +get_host_url() { + echo "${S390_HOST_NAME}" +} + +cmd=${1-""} + +if [[ "${cmd}" != "" && "${cmd}" != "help" ]]; then + host_url=$(get_host_url) +fi + +kubeconfig_path="${HOME}/.operator-dev/s390-host.kubeconfig" + +configure() { + ssh -T -q "${host_url}" "sudo chown \$(whoami):\$(whoami) ~/.docker || true; mkdir -p ~/.docker" + if [[ -f "${HOME}/.docker/config.json" ]]; then + echo "Copying local ~/.docker/config.json authorization credentials to s390x host" + jq '. | with_entries(select(.key == "auths"))' "${HOME}/.docker/config.json" | ssh -T -q "${host_url}" 'cat > ~/.docker/config.json' + fi + + sync + + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; scripts/dev/switch_context.sh root-context; scripts/minikube/setup_minikube_host.sh " +} + +sync() { + rsync --verbose --archive --compress --human-readable --recursive --progress \ + --delete --delete-excluded --max-size=1000000 --prune-empty-dirs \ + -e ssh \ + --include-from=.rsyncinclude \ + --exclude-from=.gitignore \ + --exclude-from=.rsyncignore \ + ./ "${host_url}:~/mongodb-kubernetes/" + + rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ + --max-size=1000000 \ + -e ssh \ + ~/.operator-dev/ \ + "${host_url}:~/.operator-dev" & + + wait +} + +remote-prepare-local-e2e-run() { + set -x + sync + cmd make switch context=e2e_mdb_kind_ubi_cloudqa + cmd scripts/dev/prepare_local_e2e_run.sh + rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ + --max-size=1000000 \ + -e ssh \ + "${host_url}:~/mongodb-kubernetes/.multi_cluster_local_test_files" \ + ./ & + scp "${host_url}:~/.operator-dev/multicluster_kubeconfig" "${KUBE_CONFIG_PATH}" & + + wait +} + +get-kubeconfig() { + # For minikube, we need to get the kubeconfig and certificates + echo "Getting kubeconfig from minikube on s390x host..." + + # Create local minikube directory structure + mkdir -p "${HOME}/.minikube" + + # Copy certificates from remote host + echo "Copying minikube certificates..." + scp "${host_url}:~/.minikube/ca.crt" "${HOME}/.minikube/" + scp "${host_url}:~/.minikube/client.crt" "${HOME}/.minikube/" + scp "${host_url}:~/.minikube/client.key" "${HOME}/.minikube/" + + # Get kubeconfig and update paths to local ones + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; kubectl config view --raw" > "${kubeconfig_path}" + + # Update certificate paths to local paths + sed -i '' "s|/home/cloud-user/.minikube|${HOME}/.minikube|g" "${kubeconfig_path}" + + # Update server addresses to use localhost for tunneling + sed -i '' "s|https://192.168.[0-9]*.[0-9]*:\([0-9]*\)|https://127.0.0.1:\1|g" "${kubeconfig_path}" + + echo "Copied minikube kubeconfig and certificates to ${kubeconfig_path}" +} + +recreate-minikube-cluster() { + configure "$(uname -m)" 2>&1| prepend "minikube_host.sh configure" + echo "Recreating minikube cluster on ${S390_HOST_NAME} (${host_url})..." + # shellcheck disable=SC2088 + ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; minikube delete || true; minikube start --driver=podman --memory=8192mb --cpus=4" + echo "Copying kubeconfig to ${kubeconfig_path}" + get-kubeconfig +} + +tunnel() { + shift 1 + echo "Setting up tunnel for minikube cluster..." + + # Get the minikube API server port from remote host + local api_port + api_port=$(ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; minikube ip 2>/dev/null && echo ':8443' | tr -d '\n'") + + if [[ -z "${api_port}" ]]; then + echo "Could not determine minikube API server details. Is the cluster running?" + return 1 + fi + + # Extract just the port (8443) + local port="8443" + echo "Forwarding localhost:${port} to minikube cluster API server" + + # Forward the API server port through minikube + set -x + # shellcheck disable=SC2029 + ssh -L "${port}:$(ssh -T -q "${host_url}" "minikube ip"):${port}" "${host_url}" "$@" + set +x +} + +retry_with_sleep() { + shift 1 + cmd=$1 + local sleep_time + sleep_time=5 + + while true; do + ${cmd} || true + echo "Retrying command after ${sleep_time} of sleep: ${cmd}" + sleep 5; + done +} + +ssh_to_host() { + shift 1 + # shellcheck disable=SC2029 + ssh "$@" "${host_url}" +} + +upload-my-ssh-private-key() { + ssh -T -q "${host_url}" "mkdir -p ~/.ssh" + scp "${HOME}/.ssh/id_rsa" "${host_url}:~/.ssh/id_rsa" + scp "${HOME}/.ssh/id_rsa.pub" "${host_url}:~/.ssh/id_rsa.pub" + ssh -T -q "${host_url}" "chmod 700 ~/.ssh && chown -R \$(whoami):\$(whoami) ~/.ssh" +} + +cmd() { + if [[ "$1" == "cmd" ]]; then + shift 1 + fi + + cmd="cd ~/mongodb-kubernetes; $*" + ssh -T -q "${host_url}" "${cmd}" +} + +usage() { + echo "USAGE: + minikube_host.sh + +PREREQUISITES: + - s390x host with SSH access + - define S390_HOST_NAME env var (e.g., export S390_HOST_NAME=user@hostname) + - SSH key-based authentication configured + +COMMANDS: + configure installs on a host: calls sync, switches context, installs necessary software (auto-detects arch) + sync rsync of project directory + recreate-minikube-cluster recreates minikube cluster and executes get-kubeconfig + remote-prepare-local-e2e-run executes prepare-local-e2e on the remote host + get-kubeconfig copies remote minikube kubeconfig locally to ~/.operator-dev/s390-host.kubeconfig + tunnel [args] creates ssh session with tunneling to all API servers + ssh [args] creates ssh session passing optional arguments to ssh + cmd [command with args] execute command as if being on s390x host + upload-my-ssh-private-key uploads your ssh keys (~/.ssh/id_rsa) to s390x host + help this message + +EXAMPLES: + export S390_HOST_NAME=user@ibmz8 + minikube_host.sh tunnel + minikube_host.sh cmd 'make e2e test=replica_set' +" +} + +case ${cmd} in +configure) configure "$@" ;; +recreate-minikube-cluster) recreate-minikube-cluster "$@" ;; +get-kubeconfig) get-kubeconfig ;; +remote-prepare-local-e2e-run) remote-prepare-local-e2e-run ;; +ssh) ssh_to_host "$@" ;; +tunnel) retry_with_sleep tunnel "$@" ;; +sync) sync ;; +cmd) cmd "$@" ;; +upload-my-ssh-private-key) upload-my-ssh-private-key ;; +help) usage ;; +*) usage ;; +esac diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh new file mode 100755 index 000000000..16f1c1c31 --- /dev/null +++ b/scripts/minikube/setup_minikube_host.sh @@ -0,0 +1,255 @@ +#!/usr/bin/env bash + +# this script downloads necessary tooling for alternative architectures (s390x, ppc64le) using minikube (similar to setup_evg_host.sh) +source scripts/dev/set_env_context.sh + +set -Eeou pipefail + +set_limits() { + echo "Increasing fs.inotify.max_user_instances" + sudo sysctl -w fs.inotify.max_user_instances=8192 + + echo "Increasing fs.inotify.max_user_watches" + sudo sysctl -w fs.inotify.max_user_watches=10485760 + + echo "Increasing the number of open files" + nofile_max=$(cat /proc/sys/fs/nr_open) + nproc_max=$(ulimit -u) + sudo tee -a /etc/security/limits.conf <>> Setting up local registry and custom kicbase image for ppc64le..." + + # Check if local registry is running (with fallback for namespace issues) + registry_running=false + if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then + echo "Registry detected via HTTP check (podman ps failed)" + registry_running=true + fi + + if ! $registry_running; then + echo "Starting local container registry on port 5000..." + + # Clean up any existing registry first + sudo podman rm -f registry 2>/dev/null || true + + if ! sudo podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2; then + echo "❌ Failed to start local registry - trying alternative approach" + exit 1 + fi + + # Wait for registry to be ready + echo "Waiting for registry to be ready..." + for i in {1..30}; do + if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then + break + fi + sleep 1 + done + else + echo "✅ Local registry already running" + fi + + # Configure podman to trust local registry (both user and root level for minikube) + echo "Configuring registries.conf to trust local registry..." + + # User-level config + mkdir -p ~/.config/containers + cat > ~/.config/containers/registries.conf << 'EOF' +[[registry]] +location = "localhost:5000" +insecure = true +EOF + + # Root-level config (since minikube uses sudo podman) + sudo mkdir -p /root/.config/containers + sudo tee /root/.config/containers/registries.conf << 'EOF' >/dev/null +[[registry]] +location = "localhost:5000" +insecure = true +EOF + + echo "✅ Registry configuration created for both user and root" + custom_image_tag="localhost:5000/kicbase:v0.0.47" + + # Determine image tag + custom_image_tag="localhost:5000/kicbase:v0.0.47" + if curl -s http://localhost:5000/v2/kicbase/tags/list | grep -q "v0.0.47"; then + echo "Custom kicbase image already exists in local registry" + return 0 + fi + + # Build custom kicbase image with crictl + echo "Building custom kicbase image with crictl for ppc64le..." + + # Build custom kicbase image + mkdir -p "${PROJECT_DIR:-.}/scripts/minikube/kicbase" + cat > "${PROJECT_DIR:-.}/scripts/minikube/kicbase/Dockerfile" << 'EOF' +FROM gcr.io/k8s-minikube/kicbase:v0.0.47 +RUN if [ "$(uname -m)" = "ppc64le" ]; then \ + CRICTL_VERSION="v1.28.0" && \ + curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-ppc64le.tar.gz" \ + -o /tmp/crictl.tar.gz && \ + tar -C /usr/bin -xzf /tmp/crictl.tar.gz && \ + chmod +x /usr/bin/crictl && \ + rm /tmp/crictl.tar.gz; \ + fi +EOF + + cd "${PROJECT_DIR:-.}/scripts/minikube/kicbase" + sudo podman build -t "${custom_image_tag}" . || { + echo "Failed to build custom image" + return 1 + } + sudo podman push "${custom_image_tag}" --tls-verify=false || { + echo "Failed to push to registry" + return 1 + } + cd - >/dev/null + echo "Custom kicbase image ready: ${custom_image_tag}" + fi + return 0 +} + +# Start minikube with podman driver +start_minikube_cluster() { + echo ">>> Starting minikube cluster with podman driver..." + + # Clean up any existing minikube state to avoid cached configuration issues + echo "Cleaning up any existing minikube state..." + if [[ -d ~/.minikube/machines/minikube ]]; then + echo "Removing ~/.minikube/machines/minikube directory..." + rm -rf ~/.minikube/machines/minikube + fi + + echo "Ensuring clean minikube state..." + "${PROJECT_DIR:-.}/bin/minikube" delete 2>/dev/null || true + + local start_args=("--driver=podman") + + if [[ "${ARCH}" == "ppc64le" ]]; then + echo "Using custom kicbase image for ppc64le with crictl..." + + start_args+=("--base-image=localhost:5000/kicbase:v0.0.47") + start_args+=("--insecure-registry=localhost:5000") + fi + + # Use default bridge CNI to avoid Docker Hub rate limiting issues + # start_args+=("--cni=bridge") + + echo "Starting minikube with args: ${start_args[*]}" + if "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}"; then + echo "✅ Minikube started successfully" + else + echo "❌ Minikube failed to start" + echo "Minikube logs:" + "${PROJECT_DIR:-.}/bin/minikube" logs | tail -20 + return 1 + fi +} + +setup_podman() { + echo "Setting up podman for ${ARCH}..." + + # Check if podman is already available + if command -v podman &> /dev/null; then + echo "✅ Podman already installed" + + # Diagnose podman state + echo "=== Podman Diagnostics ===" + echo "User: $(whoami), UID: $(id -u)" + echo "User namespace support: $(cat /proc/self/uid_map 2>/dev/null || echo 'not available')" + echo "Systemctl user status:" + systemctl --user status podman.socket 2>/dev/null || echo "podman.socket not active" + echo "Running 'sudo podman info' command..." + sudo podman info 2>&1 + fi + + + # Configure podman to use cgroupfs instead of systemd in CI + mkdir -p ~/.config/containers + cat > ~/.config/containers/containers.conf << EOF +[containers] +cgroup_manager = "cgroupfs" +events_logger = "file" + +[engine] +cgroup_manager = "cgroupfs" +EOF + +} + +# Setup podman and container runtime +setup_podman +set_limits +download_minikube + +# Setup local registry and custom kicbase image for ppc64le if needed +setup_local_registry_and_custom_image + +echo "" +echo ">>> Verifying minikube installation..." +if command -v minikube &> /dev/null; then + minikube_version=$(minikube version --short 2>/dev/null || minikube version 2>/dev/null | head -n1) + echo "✅ Minikube installed successfully: ${minikube_version}" +else + echo "❌ Minikube installation failed - minikube command not found" + echo "Please check the installation logs above for errors" + exit 1 +fi + +if [[ "${ARCH}" == "ppc64le" ]]; then + echo "" + echo ">>> Note: crictl will be patched into the minikube container after startup" +else + echo "" + echo ">>> Using standard kicbase image (crictl included for x86_64/aarch64/s390x)" +fi + +# Start the minikube cluster +start_minikube_cluster + +# Update kubectl context to point to the running cluster +echo "" +echo ">>> Updating kubectl context for minikube cluster..." +"${PROJECT_DIR:-.}/bin/minikube" update-context +echo "✅ Kubectl context updated successfully" + +echo "Minikube host setup completed successfully for ${ARCH}!" + +# Final status +echo "" +echo "==========================================" +echo "✅ Setup Summary" +echo "==========================================" +echo "Architecture: ${ARCH}" +echo "Container Runtime: podman" +echo "Minikube Driver: podman" +echo "Minikube: Default cluster" +echo "Minikube: ${minikube_version}" +echo "CNI: bridge (default)" +if [[ "${ARCH}" == "ppc64le" ]]; then + echo "Special Config: Custom kicbase image with crictl via local registry" +fi