diff --git a/.github/workflows/integration-tests-core.yml b/.github/workflows/integration-tests-core.yml index a2fdff277e2..ee33e4fb379 100644 --- a/.github/workflows/integration-tests-core.yml +++ b/.github/workflows/integration-tests-core.yml @@ -50,12 +50,6 @@ jobs: with: repository: 'firebolt-db/firebolt-python-sdk' - - name: Setup Firebolt Core - id: setup-core - uses: firebolt-db/action-setup-core@eabcd701de0be41793fda0655d29d46c70c847c2 # main - with: - tag_version: ${{ inputs.tag_version || vars.DEFAULT_CORE_IMAGE_TAG }} - - name: Set up Python uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: @@ -66,31 +60,26 @@ jobs: python -m pip install --upgrade pip pip install ".[dev]" - - name: Run integration tests HTTP + - name: "Compose: Run integration tests HTTP" env: SERVICE_ID: ${{ secrets.FIREBOLT_CLIENT_ID_STG_NEW_IDN }} SERVICE_SECRET: ${{ secrets.FIREBOLT_CLIENT_SECRET_STG_NEW_IDN }} - DATABASE_NAME: "firebolt" - ENGINE_NAME: "" - STOPPED_ENGINE_NAME: "" - API_ENDPOINT: "" - ACCOUNT_NAME: "" - CORE_URL: ${{ steps.setup-core.outputs.service_url }} run: | - pytest -o log_cli=true -o log_cli_level=WARNING tests/integration -k "core" --alluredir=allure-results/ + pytest -o log_cli=true -o log_cli_level=WARNING tests/integration -k "core" --run-compose --alluredir=allure-results/ - - name: Run integration tests HTTPS + - name: "Compose: Run integration tests HTTPS" env: SERVICE_ID: ${{ secrets.FIREBOLT_CLIENT_ID_STG_NEW_IDN }} SERVICE_SECRET: ${{ secrets.FIREBOLT_CLIENT_SECRET_STG_NEW_IDN }} - DATABASE_NAME: "firebolt" - ENGINE_NAME: "" - STOPPED_ENGINE_NAME: "" - API_ENDPOINT: "" - ACCOUNT_NAME: "" - CORE_URL: ${{ steps.setup-core.outputs.service_https_url }} run: | - pytest -o log_cli=true -o log_cli_level=WARNING tests/integration -k "core" --alluredir=allure-results-https/ + pytest -o log_cli=true -o log_cli_level=WARNING tests/integration -k "core" --run-https --run-compose --alluredir=allure-results-https/ + + - name: "Kind: Run integration tests HTTP" + env: + HELM_CHART_VERSION: "0.3.0" + CORE_IMAGE_TAG: ${{ inputs.tag_version || vars.DEFAULT_CORE_IMAGE_TAG }} + run: | + pytest -o log_cli=true -o log_cli_level=WARNING tests/integration -k "core" --run-kind --alluredir=allure-results-kind-http/ # Need to pull the pages branch in order to fetch the previous runs - name: Get Allure history @@ -101,24 +90,39 @@ jobs: ref: gh-pages path: gh-pages - - name: Allure Report - uses: firebolt-db/action-allure-report@781b4529b67b4f393c63d7dc1e098cb558e1ab16 # v1.4.1 + - name: Install Allure CLI + id: install-allure if: always() - continue-on-error: true - with: - github-key: ${{ secrets.GITHUB_TOKEN }} - test-type: core - allure-dir: allure-results - pages-branch: gh-pages - repository-name: python-sdk + env: + ALLURE_VERSION: "2.38.1" + run: | + npm install -g allure-commandline - - name: Allure Report HTTPS - uses: firebolt-db/action-allure-report@781b4529b67b4f393c63d7dc1e098cb558e1ab16 # v1.4.1 - if: always() - continue-on-error: true + - name: Generate All Reports Locally + id: generate-reports + if: always() && steps.install-allure.outcome == 'success' + run: | + REPORT_BASE="allure-final/allure/python-sdk_${{ github.sha }}" + allure generate allure-results -o "${REPORT_BASE}_core" + allure generate allure-results-https -o "${REPORT_BASE}_core_https" + allure generate allure-results-kind-http -o "${REPORT_BASE}_core_kind" + + - name: Deploy all reports to GitHub Pages + uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0 + id: deploy-reports + if: always() && steps.generate-reports.outcome == 'success' with: - github-key: ${{ secrets.GITHUB_TOKEN }} - test-type: core_https - allure-dir: allure-results-https - pages-branch: gh-pages - repository-name: python-sdk + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_branch: gh-pages + publish_dir: ./allure-final + keep_files: true + + - name: Set Job Summary + if: always() && steps.deploy-reports.outcome == 'success' + env: + BASE_URL: "https://python.docs.firebolt.io/allure/python-sdk_${{ github.sha }}" + run: | + echo "### Test Reports" >> $GITHUB_STEP_SUMMARY + echo "* [Core HTTP Report](${BASE_URL}_core)" >> $GITHUB_STEP_SUMMARY + echo "* [HTTPS Report](${BASE_URL}_core_https)" >> $GITHUB_STEP_SUMMARY + echo "* [Kind HTTP Report](${BASE_URL}_core_kind)" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index 56180e4eb09..8220a1c2f2c 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -11,11 +11,10 @@ jobs: uses: ./.github/workflows/unit-tests.yml secrets: GIST_PAT: ${{ secrets.GIST_PAT }} - + security-scan: needs: [unit-tests] uses: ./.github/workflows/security-scan.yml secrets: FOSSA_TOKEN: ${{ secrets.FOSSA_TOKEN }} SONARCLOUD_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} - diff --git a/CONTRIBUTING.MD b/CONTRIBUTING.MD index 76212877b1d..f4c0c8ece45 100644 --- a/CONTRIBUTING.MD +++ b/CONTRIBUTING.MD @@ -16,10 +16,10 @@ Optionally setup PyCharm linting shortcut: Name: lint Description: Format the current file Program: $PyInterpreterDirectory$/pre-commit -Arguments: run --files=$FilePath$Working +Arguments: run --files=$FilePath$Working Working Directory: $ProjectFileDir$ ``` -2. Preferences -> Keymap -> External Tools -> lint, +2. Preferences -> Keymap -> External Tools -> lint, Assign the keyboard shortcut `Option-cmd-l` ### Before Committing @@ -28,8 +28,8 @@ Working Directory: $ProjectFileDir$ 2. run `mypy src` to check for type errors 3. run `pytest tests/unit` to run unit tests -Note: while there is a `mypy` hook for pre-commit, -I found it too buggy to be worthwhile, so I just run mypy manually. +Note: while there is a `mypy` hook for pre-commit, +I found it too buggy to be worthwhile, so I just run mypy manually. ### PR procedures @@ -44,13 +44,35 @@ I found it too buggy to be worthwhile, so I just run mypy manually. 4. If the integration tests pass and the change looks good to the maintainer they approve it. 5. Merge into the main branch. Only the maintainers have the ability to merge a PR. They will do so at the earliest convenience, with regards to the impact of the change as well as the release planning. +### Integration Tests +The integration test suite runs against Firebolt SaaS v1, Firebolt SaaS v2 and Firebolt Core. The SaaS test suites only run as part of the CI. Locally you can run the Firebolt Core test suite. + +#### Run the tests locally + +**Run on Kind:** +You must have [kind](https://kind.sigs.k8s.io/) installed. The test driver will create a kind cluster and install the Firebolt Core Helm chart. + +```shell +pytest -k "core" --run-kind tests/integration +``` + +**Run on Docker Compose:** +```shell +pytest -k "core" --run-compose tests/integration +``` + +### Include a test in the core suite +Not all integration tests are supported by Firebolt Core. + +The tests that are supported, use `@fixture(params=["core"])` on the fixture. Other ways are to use a `core` mark or add `core` in the name of the test. For more details on how tests are selected with the `pytest -k` flag, please check the pytest documentation. + ### Docstrings -Use the Google format for docstrings. Do not include types or an indication -of "optional" in docstrings. Those should be captured in the function signature +Use the Google format for docstrings. Do not include types or an indication +of "optional" in docstrings. Those should be captured in the function signature as type annotations; no need to repeat them in the docstring. -Public methods and functions should have docstrings. +Public methods and functions should have docstrings. One-liners are fine for simple methods and functions. For PyCharm Users: @@ -73,20 +95,20 @@ In general, organize class internals in this order: * alternative constructors first * other classmethods next 4. properties (`@property`) -5. remaining methods +5. remaining methods * put more important / broadly applicable functions first * group related functions together to minimize scrolling -Read more about this philosophy +Read more about this philosophy [here](https://softwareengineering.stackexchange.com/a/199317). ### Huge classes -If classes start to approach 1k lines, consider breaking them into parts, +If classes start to approach 1k lines, consider breaking them into parts, possibly like [this](https://stackoverflow.com/a/47562412). ### Versioning -Consider adopting: +Consider adopting: * https://packboard.atlassian.net/wiki/x/AYC6aQ * https://python-semantic-release.readthedocs.io/en/latest/ diff --git a/kind.yaml b/kind.yaml new file mode 100644 index 00000000000..ab4a941b308 --- /dev/null +++ b/kind.yaml @@ -0,0 +1,7 @@ +--- +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + - role: worker + - role: worker diff --git a/tests/integration/cluster/__init__.py b/tests/integration/cluster/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/cluster/base.py b/tests/integration/cluster/base.py new file mode 100644 index 00000000000..e125e8ecf65 --- /dev/null +++ b/tests/integration/cluster/base.py @@ -0,0 +1,20 @@ +import socket +from abc import ABC, abstractmethod + + +class AppManager(ABC): + @abstractmethod + def deploy(self, params: dict = None) -> dict: + """Deploy the application environment.""" + + @abstractmethod + def cleanup(self, setup_data: dict) -> None: + """Clean up the application environment.""" + + +def get_free_port(): + """Ask the OS for a free ephemeral port.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return s.getsockname()[1] diff --git a/tests/integration/cluster/compose.py b/tests/integration/cluster/compose.py new file mode 100644 index 00000000000..db88cd33c1b --- /dev/null +++ b/tests/integration/cluster/compose.py @@ -0,0 +1,218 @@ +import json +import os +import shutil +import socket +import subprocess +import tempfile +import uuid +from os import getenv +from time import sleep + +import yaml + +from tests.integration.cluster.base import AppManager, get_free_port + +NGINX_CONFIG_TEMPLATE = """ +{upstream_block} +server {{ + listen 443 ssl; + server_name localhost 127.0.0.1; + + ssl_certificate /etc/nginx/certs/server.pem; + ssl_certificate_key /etc/nginx/certs/server.key; + + location / {{ + proxy_pass http://{proxy_target}; + proxy_set_header Host $host; + }} +}} +""" + + +def generate_self_signed_cert(cert_path: str, key_path: str) -> None: + """Generate a self-signed certificate for localhost using openssl.""" + os.makedirs(os.path.dirname(cert_path), exist_ok=True) + subprocess.run( + [ + "openssl", + "req", + "-x509", + "-newkey", + "rsa:4096", + "-keyout", + key_path, + "-out", + cert_path, + "-days", + "365", + "-nodes", + "-subj", + "/CN=localhost", + "-addext", + "subjectAltName = DNS:localhost, IP:127.0.0.1", + ], + check=True, + capture_output=True, + ) + + +class ComposeAppManager(AppManager): + def deploy(self, params=None): + return deploy_compose(params) + + def cleanup(self, setup_data): + cleanup_compose(setup_data) + + +def deploy_compose(params=None): + """Deploy Firebolt Core using Docker Compose.""" + if params is None: + params = {} + + nodes_count = int(params.get("nodesCount", 1)) + image_tag = params.get("image.tag", getenv("CORE_IMAGE_TAG", "preview-rc")) + + test_id = ( + f"{os.environ.get('PYTEST_XDIST_WORKER', 'python-sdk')}-{uuid.uuid4().hex[:4]}" + ) + project_name = f"core-{test_id}" + + tmp_dir = tempfile.mkdtemp(prefix=f"firebolt-compose-{test_id}") + resources_dir = os.path.join(tmp_dir, "resources") + certs_dir = os.path.join(resources_dir, "certs") + os.makedirs(certs_dir, exist_ok=True) + + # Generate certs + server_cert_path = os.path.join(certs_dir, "server.pem") + server_key_path = os.path.join(certs_dir, "server.key") + generate_self_signed_cert(server_cert_path, server_key_path) + + # Generate nodes + node_names = [f"firebolt-core-{i}" for i in range(nodes_count)] + + # Generate core config + core_config = {"nodes": [{"host": name} for name in node_names]} + with open(os.path.join(resources_dir, "config.json"), "w") as f: + json.dump(core_config, f, indent=2) + + # Generate nginx config + if nodes_count > 1: + upstream_servers = "".join([f"server {name}:3473; " for name in node_names]) + upstream_block = f"upstream firebolt {{ {upstream_servers}}}" + proxy_target = "firebolt" # Upstream name, no port needed + else: + # Single node, no upstream block needed + upstream_block = "" + proxy_target = f"{node_names[0]}:3473" + + nginx_config = NGINX_CONFIG_TEMPLATE.format( + upstream_block=upstream_block, proxy_target=proxy_target + ) + with open(os.path.join(resources_dir, "default.conf"), "w") as f: + f.write(nginx_config) + + # Generate docker-compose.yaml + node_ports = [] + services = {} + + for i, name in enumerate(node_names): + node_port = get_free_port() + node_ports.append(node_port) + services[name] = { + "image": f"ghcr.io/firebolt-db/firebolt-core:{image_tag}", + "container_name": f"{project_name}-{name}", + "command": f"--node {i}", + "privileged": True, + "restart": "no", + "ulimits": {"memlock": 8589934592}, + "ports": [f"{node_port}:3473"], + "volumes": [ + "./resources/config.json:/firebolt-core/config.json:ro", + f"./{name}:/firebolt-core/data", + ], + } + # Create data dir + os.makedirs(os.path.join(tmp_dir, name), exist_ok=True) + + # Create one nginx instance per core node, all load balancing + nginx_ports = [] + for i in range(nodes_count): + nginx_port = get_free_port() + nginx_ports.append(nginx_port) + services[f"nginx-{i}"] = { + "image": "nginx:alpine", + "container_name": f"{project_name}-nginx-{i}", + "ports": [f"{nginx_port}:443"], + "volumes": [ + "./resources/certs:/etc/nginx/certs:ro", + "./resources/default.conf:/etc/nginx/conf.d/default.conf:ro", + ], + "depends_on": node_names, + } + + compose_data = {"services": services} + with open(os.path.join(tmp_dir, "docker-compose.yaml"), "w") as f: + yaml.dump(compose_data, f, default_flow_style=False) + + print(f"[Compose] Starting project {project_name} in {tmp_dir}...") + subprocess.run( + ["docker", "compose", "-p", project_name, "up", "-d"], + cwd=tmp_dir, + check=True, + capture_output=True, + ) + + # Wait for core to be healthy + print(f"[Compose] Waiting for cluster {project_name} to be healthy...") + for i in range(30): + try: + # Try to connect to the core port directly + with socket.create_connection(("127.0.0.1", node_ports[0]), timeout=1): + res = subprocess.run( + [ + "curl", + "-s", + "-o", + "/dev/null", + "-w", + "%{http_code}", + f"http://127.0.0.1:{node_ports[0]}", + ], + capture_output=True, + text=True, + ) + if res.stdout.strip() == "200": + print(f"[Compose] Cluster is healthy at 127.0.0.1:{node_ports[0]}") + break + except (socket.error, ConnectionRefusedError): + pass + + if i == 29: + raise RuntimeError( + f"Cluster {project_name} failed to become healthy at {node_ports[0]}" + ) + sleep(2) + + ips = [f"127.0.0.1:{port}" for port in node_ports] + url = f"http://127.0.0.1:{node_ports[0]}" + + return { + "url": url, + "project_name": project_name, + "tmp_dir": tmp_dir, + "ips": ips, + "nginx_ports": nginx_ports, # list of ports + "server_cert_path": server_cert_path, + } + + +def cleanup_compose(setup_data): + """Stop and clean up Docker Compose project.""" + print(f"[Compose] Stopping project {setup_data['project_name']}...") + subprocess.run( + ["docker", "compose", "-p", setup_data["project_name"], "down", "-v"], + cwd=setup_data["tmp_dir"], + check=True, + capture_output=True, + ) + shutil.rmtree(setup_data["tmp_dir"]) diff --git a/tests/integration/cluster/helm.py b/tests/integration/cluster/helm.py new file mode 100644 index 00000000000..3aa654f2fb9 --- /dev/null +++ b/tests/integration/cluster/helm.py @@ -0,0 +1,178 @@ +import os +import socket +import subprocess +import uuid +from os import getenv +from time import sleep + +from tests.integration.cluster.base import AppManager, get_free_port + +CORE_HELM_CHART_VERSION_ENV = "CORE_HELM_CHART_VERSION" +CORE_DEFAULT_HELM_CHART_VERSION = "0.3.0" +CORE_IMAGE_TAG_ENV = "CORE_IMAGE_TAG" +CORE_PORT = 3473 + + +class HelmAppManager(AppManager): + def __init__(self, k8s_cluster): + self.k8s_cluster = k8s_cluster + + def deploy(self, params=None): + return deploy_helm_chart(self.k8s_cluster, helm_values=params) + + def cleanup(self, setup_data): + cleanup_helm_chart(self.k8s_cluster, setup_data) + + +def deploy_helm_chart(k8s_cluster, helm_values=None): + """Common logic for both session and function-scoped setups.""" + test_id = ( + f"{os.environ.get('PYTEST_XDIST_WORKER', 'python-sdk')}-{uuid.uuid4().hex[:4]}" + ) + release, ns = f"core-{test_id}", f"ns-{test_id}" + local_port = get_free_port() + + # Use CORE_IMAGE_TAG if not provided in helm_values + if helm_values is None: + helm_values = {} + if "image.tag" not in helm_values: + core_image_tag = getenv(CORE_IMAGE_TAG_ENV) + if core_image_tag: + helm_values["image.tag"] = core_image_tag + + set_args = [] + if helm_values: + for key, value in helm_values.items(): + set_args.extend(["--set", f"{key}={value}"]) + + print(f"[Kind] Installing Helm release {release} into namespace {ns}...") + subprocess.run( + [ + "helm", + "install", + release, + "oci://ghcr.io/firebolt-db/helm-charts/firebolt-core", + "--version", + getenv(CORE_HELM_CHART_VERSION_ENV, CORE_DEFAULT_HELM_CHART_VERSION), + "-n", + ns, + "--create-namespace", + "--wait", + "--kube-context", + k8s_cluster, + ] + + set_args, + check=True, + ) + + print(f"[Kind] Waiting for pods in {ns} to be ready...") + subprocess.run( + [ + "kubectl", + "wait", + "--for=condition=ready", + "pod", + "-l", + "app.kubernetes.io/instance=" + release, + "--namespace", + ns, + "--timeout=120s", + "--context", + k8s_cluster, + ], + check=True, + ) + + pod_names_result = subprocess.run( + [ + "kubectl", + "get", + "pods", + "-l", + "app.kubernetes.io/instance=" + release, + "-n", + ns, + "-o", + "jsonpath={.items[*].metadata.name}", + "--context", + k8s_cluster, + ], + capture_output=True, + text=True, + check=True, + ) + pod_names = pod_names_result.stdout.split() + + pf_procs = [] + ips_with_ports = [] + for i, pod_name in enumerate(pod_names): + ip = "127.0.0.1" + port = local_port + i + ips_with_ports.append(f"{ip}:{port}") + print(f"[Kind] Port-forward to pod {pod_name} on {ip}:{port}->{CORE_PORT}...") + pf_proc = subprocess.Popen( + [ + "kubectl", + "port-forward", + "--address", + ip, + f"pod/{pod_name}", + f"{port}:{CORE_PORT}", + "-n", + ns, + "--context", + k8s_cluster, + ] + ) + pf_procs.append(pf_proc) + + sleep(1) + # Wait for port-forward + for i in range(10): + try: + # Check all port-forwards + all_up = True + for ip_port in ips_with_ports: + ip, port = ip_port.split(":") + try: + with socket.create_connection((ip, int(port)), timeout=1): + print(f"[Kind] Port-forward on {ip}:{port} is UP") + except (socket.error, ConnectionRefusedError): + all_up = False + break + if all_up: + break + except (socket.error, ConnectionRefusedError): + if i == 9: + raise RuntimeError(f"Failed to connect to port-forward on {local_port}") + sleep(1) + + url = f"http://127.0.0.1:{local_port}" + + # Return everything needed for cleanup + return { + "url": url, + "processes": pf_procs, + "release": release, + "ns": ns, + "ips": ips_with_ports, + } + + +def cleanup_helm_chart(k8s_cluster, setup_data): + """Common teardown logic.""" + for proc in setup_data["processes"]: + proc.terminate() + subprocess.run( + [ + "helm", + "uninstall", + setup_data["release"], + "-n", + setup_data["ns"], + "--kube-context", + k8s_cluster, + ], + check=True, + ) + subprocess.run(["kubectl", "delete", "ns", setup_data["ns"]], check=True) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 3894fce433a..0b3e53601f0 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,3 +1,5 @@ +import os +import subprocess from logging import getLogger from os import environ, getenv from time import time @@ -8,6 +10,8 @@ from firebolt.client.auth import ClientCredentials from firebolt.client.auth.firebolt_core import FireboltCore from firebolt.client.auth.username_password import UsernamePassword +from tests.integration.cluster.compose import ComposeAppManager +from tests.integration.cluster.helm import HelmAppManager LOGGER = getLogger(__name__) @@ -24,18 +28,34 @@ STOPPED_ENGINE_URL_ENV = "STOPPED_ENGINE_URL" CORE_URL_ENV = "CORE_URL" -# https://docs.pytest.org/en/latest/example/simple.html#control-skipping-of-tests-according-to-command-line-option -# Adding slow marker to tests +KIND_CLUSTER_NAME = "firebolt-python-sdk" def pytest_addoption(parser): parser.addoption( "--runslow", action="store_true", default=False, help="run slow tests" ) + parser.addoption( + "--run-kind", action="store_true", help="Run integration tests against kind" + ) + parser.addoption( + "--run-compose", + action="store_true", + help="Run integration tests against docker-compose", + ) + parser.addoption( + "--run-https", + action="store_true", + help="Run integration tests against https endpoint", + ) def pytest_configure(config): config.addinivalue_line("markers", "slow: mark test as slow to run") + config.addinivalue_line( + "markers", + "dedicated_core_cluster: Marker for tests that need a dedicated core cluster installation", + ) def pytest_collection_modifyitems(config, items): @@ -72,88 +92,88 @@ def must_env(var_name: str) -> str: return environ[var_name] -@fixture(scope="session") -def engine_name() -> str: +@fixture(scope="function") +def engine_name(app_setup) -> str: return must_env(ENGINE_NAME_ENV) -@fixture(scope="session") -def stopped_engine_name() -> str: +@fixture(scope="function") +def stopped_engine_name(app_setup) -> str: return must_env(STOPPED_ENGINE_NAME_ENV) -@fixture(scope="session") -def database_name() -> str: +@fixture(scope="function") +def database_name(app_setup) -> str: return must_env(DATABASE_NAME_ENV) -@fixture(scope="session") -def use_db_name(database_name: str): +@fixture(scope="function") +def use_db_name(app_setup, database_name: str): return f"{database_name}_use_db_test" -@fixture(scope="session") -def account_name() -> str: +@fixture(scope="function") +def account_name(app_setup) -> str: return must_env(ACCOUNT_NAME_ENV) -@fixture(scope="session") -def invalid_account_name(account_name: str) -> str: +@fixture(scope="function") +def invalid_account_name(app_setup, account_name: str) -> str: return f"{account_name}--" -@fixture(scope="session") -def api_endpoint() -> str: +@fixture(scope="function") +def api_endpoint(app_setup) -> str: return must_env(API_ENDPOINT_ENV) -@fixture(scope="session") -def service_id() -> str: +@fixture(scope="function") +def service_id(app_setup) -> str: return must_env(SERVICE_ID_ENV) -@fixture(scope="session") -def service_secret() -> Secret: +@fixture(scope="function") +def service_secret(app_setup) -> Secret: return Secret(must_env(SERVICE_SECRET_ENV)) -@fixture(scope="session") -def auth(service_id: str, service_secret: Secret) -> ClientCredentials: +@fixture(scope="function") +def auth(app_setup, service_id: str, service_secret: Secret) -> ClientCredentials: return ClientCredentials(service_id, service_secret.value) -@fixture(scope="session") -def core_auth() -> FireboltCore: +@fixture(scope="function") +def core_auth(app_setup) -> FireboltCore: return FireboltCore() -@fixture(scope="session") -def username() -> str: +@fixture(scope="function") +def username(app_setup) -> str: return must_env(USER_NAME_ENV) -@fixture(scope="session") -def password() -> str: +@fixture(scope="function") +def password(app_setup) -> str: return Secret(must_env(PASSWORD_ENV)) -@fixture(scope="session") -def password_auth(username: str, password: Secret) -> UsernamePassword: +@fixture(scope="function") +def password_auth(app_setup, username: str, password: Secret) -> UsernamePassword: return UsernamePassword(username, password.value) -@fixture(scope="session") -def engine_url() -> str: +@fixture(scope="function") +def engine_url(app_setup) -> str: return must_env(ENGINE_URL_ENV) -@fixture(scope="session") -def stopped_engine_url() -> str: +@fixture(scope="function") +def stopped_engine_url(app_setup) -> str: return must_env(STOPPED_ENGINE_URL_ENV) -@fixture(scope="session") -def core_url() -> str: +@fixture(scope="function") +def core_url(app_setup) -> str: return getenv(CORE_URL_ENV, "") @@ -172,3 +192,214 @@ def setter(value): assert ( end - start >= limit ), f"Test took {end - start} seconds, less than {limit} seconds" + + +def pytest_generate_tests(metafunc): + if "app_setup" in metafunc.fixturenames: + run_kind = metafunc.config.getoption("--run-kind") + run_compose = metafunc.config.getoption("--run-compose") + run_https = metafunc.config.getoption("--run-https") + + if run_kind and run_https: + raise ValueError( + "The --run-kind and --run-https arguments are not compatible. HTTPS is only supported with --run-compose." + ) + + backends = [] + + if run_compose: + backends.append("docker-compose") + if run_kind: + backends.append("kind") + + # Apply the parametrization + if backends: + metafunc.parametrize("app_setup", backends, indirect=True) + + +@fixture(scope="session") +def kind_cluster(request): + """ + Creates a kind cluster once per session using the provided kind.yaml. + Only if the --run-kind flag is provided. + """ + run_kind = request.config.getoption("--run-kind") + + if not run_kind: + # If we aren't running kind tests, don't do the heavy setup + yield None + return + + # Check if cluster already exists + result = subprocess.run(["kind", "get", "clusters"], capture_output=True, text=True) + clusters = result.stdout.splitlines() + + if KIND_CLUSTER_NAME in clusters: + print( + f"\n[Kind] Cluster '{KIND_CLUSTER_NAME}' already exists. Skipping creation." + ) + else: + print(f"\n[Kind] Creating cluster '{KIND_CLUSTER_NAME}'...") + subprocess.run( + [ + "kind", + "create", + "cluster", + "--name", + KIND_CLUSTER_NAME, + "--config", + "kind.yaml", + "--wait", + "5m", + ], + check=True, + ) + + context = f"kind-{KIND_CLUSTER_NAME}" + yield context + + # Optional: Only delete when running in CI + if os.getenv("GITHUB_ACTIONS") == "true": + print(f"\n[Kind] CI detected: Deleting cluster '{KIND_CLUSTER_NAME}'...") + subprocess.run( + ["kind", "delete", "cluster", "--name", KIND_CLUSTER_NAME], check=True + ) + else: + print( + f"\n[Kind] Local: Cluster '{KIND_CLUSTER_NAME}' will be kept for next session." + ) + + +@fixture(scope="session") +def session_helm_install(request, kind_cluster): + """The fast, shared Kind/Helm deployment.""" + if not request.config.getoption("--run-kind"): + yield None + return + + manager = HelmAppManager(kind_cluster) + data = manager.deploy() + yield data + manager.cleanup(data) + + +@fixture(scope="session") +def session_compose_install(request): + """The fast, shared Docker Compose deployment.""" + run_kind = request.config.getoption("--run-kind") + run_compose = request.config.getoption("--run-compose") + + # Only run if --run-compose is explicitly requested + if not run_compose: + yield None + return + + manager = ComposeAppManager() + data = manager.deploy() + yield data + manager.cleanup(data) + + +@fixture(scope="function") +def dedicated_helm_install(request, kind_cluster): + """Create a dedicated Kind/Helm installation.""" + marker = request.node.get_closest_marker("dedicated_core_cluster") + if marker and getattr(request, "param", "docker-compose") == "kind": + # Prefer marker args if present, else fallback to indirect param + params = marker.args[0] if marker.args else getattr(request, "param", {}) + manager = HelmAppManager(kind_cluster) + data = manager.deploy(params=params) + yield data + manager.cleanup(data) + else: + yield None + + +@fixture(scope="function") +def dedicated_compose_install(request): + """Create a dedicated Docker Compose installation.""" + marker = request.node.get_closest_marker("dedicated_core_cluster") + if marker and getattr(request, "param", "docker-compose") == "docker-compose": + # Prefer marker args if present, else fallback to indirect param + params = marker.args[0] if marker.args else getattr(request, "param", {}) + manager = ComposeAppManager() + data = manager.deploy(params=params) + yield data + manager.cleanup(data) + else: + yield None + + +@fixture(scope="function") +def app_setup( + request, + session_helm_install, + session_compose_install, + dedicated_helm_install, + dedicated_compose_install, +): + """ + Dynamically injects the required environment variables from active setup. + """ + backend = getattr(request, "param", "remote") + run_https = request.config.getoption("--run-https") + + if backend == "kind": + active_setup = dedicated_helm_install or session_helm_install + elif backend == "docker-compose": + active_setup = dedicated_compose_install or session_compose_install + else: + active_setup = None + + if active_setup: + if active_setup in (dedicated_helm_install, dedicated_compose_install): + print(f"\n[DEBUG] Using DEDICATED install at {active_setup['url']}") + else: + print(f"\n[DEBUG] Using SESSION install at {active_setup['url']}") + + if active_setup: + url = active_setup["url"] + ips = active_setup.get("ips", ["127.0.0.1"]) + env_vars = { + "CORE_URL": url, + "DATABASE_NAME": "firebolt", + "ENGINE_NAME": "", + "STOPPED_ENGINE_NAME": "", + "API_ENDPOINT": "", + "ACCOUNT_NAME": "", + "SERVICE_ID": "", + "SERVICE_SECRET": "", + "ENGINE_URL": "", + "CORE_IPS": ",".join(ips), + } + + if run_https and backend == "docker-compose": + # Override url to use https if provided + nginx_ports = active_setup.get("nginx_ports", []) + if not nginx_ports: + raise ValueError("HTTPS is requested, but no nginx ports available.") + + https_ips = [f"https://127.0.0.1:{port}" for port in nginx_ports] + env_vars["CORE_URL"] = https_ips[0] + env_vars["CORE_IPS"] = ",".join(https_ips) + + # Set SSL cert file for https requests + cert_path = active_setup.get("server_cert_path") + if not cert_path: + raise ValueError("HTTPS is requested, but no cert path available.") + env_vars["SSL_CERT_FILE"] = cert_path + + old_env = {k: os.environ.get(k) for k in env_vars} + os.environ.update(env_vars) + + yield backend + + # Restore env + for k, v in old_env.items(): + if v is None: + os.environ.pop(k, None) + else: + os.environ[k] = v + else: + # Fallback if no setup is active (e.g. external compose or remote) + yield backend diff --git a/tests/integration/dbapi/async/V2/test_dedicated_compose_async.py b/tests/integration/dbapi/async/V2/test_dedicated_compose_async.py new file mode 100644 index 00000000000..94eabfb8e9c --- /dev/null +++ b/tests/integration/dbapi/async/V2/test_dedicated_compose_async.py @@ -0,0 +1,21 @@ +from pytest import mark + +from firebolt.async_db import Connection + + +@mark.dedicated_core_cluster +@mark.parametrize("connection_factory", ["core"], indirect=True) +async def test_dedicated_compose_default(app_setup, connection: Connection): + """Verify that a dedicated cluster can be brought up with default settings.""" + async with connection.cursor() as c: + await c.execute("SELECT 1") + assert await c.fetchone() == [1] + + +@mark.dedicated_core_cluster({"nodesCount": 2}) +@mark.parametrize("connection_factory", ["core"], indirect=True) +async def test_dedicated_compose_multi_node(app_setup, connection: Connection): + """Verify that a dedicated cluster can be brought up with 2 nodes.""" + async with connection.cursor() as c: + await c.execute("SELECT 1") + assert await c.fetchone() == [1] diff --git a/tests/integration/dbapi/sync/V2/test_dedicated_compose.py b/tests/integration/dbapi/sync/V2/test_dedicated_compose.py new file mode 100644 index 00000000000..143ce460506 --- /dev/null +++ b/tests/integration/dbapi/sync/V2/test_dedicated_compose.py @@ -0,0 +1,21 @@ +from pytest import mark + +from firebolt.db import Connection + + +@mark.dedicated_core_cluster +@mark.parametrize("connection_factory", ["core"], indirect=True) +def test_dedicated_compose_default(app_setup, connection: Connection): + """Verify that a dedicated cluster can be brought up with default settings.""" + with connection.cursor() as c: + c.execute("SELECT 1") + assert c.fetchone() == [1] + + +@mark.dedicated_core_cluster({"nodesCount": 2}) +@mark.parametrize("connection_factory", ["core"], indirect=True) +def test_dedicated_compose_multi_node(app_setup, connection: Connection): + """Verify that a dedicated cluster can be brought up with 2 nodes.""" + with connection.cursor() as c: + c.execute("SELECT 1") + assert c.fetchone() == [1]