From ced675c3a843c5254740207f6f158e210a1ae449 Mon Sep 17 00:00:00 2001 From: Blake Moore Date: Tue, 21 Apr 2026 14:38:53 +0100 Subject: [PATCH 01/20] add check_snake_case.py lint script and pre-commit hook --- .pre-commit-config.yaml | 8 ++++++++ scripts/check_snake_case.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 scripts/check_snake_case.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 004c2ef0..83169423 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,12 @@ repos: + - repo: local + hooks: + - id: check-snake-case + name: Check snake_case naming + entry: python scripts/check_snake_case.py + language: python + files: ^domino/.*\.py$ + pass_filenames: true - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.0.1 hooks: diff --git a/scripts/check_snake_case.py b/scripts/check_snake_case.py new file mode 100644 index 00000000..ec11ce22 --- /dev/null +++ b/scripts/check_snake_case.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +""" +Check that no camelCase parameter or variable names are introduced in domino/ source. +Usage: python scripts/check_snake_case.py [file ...] +""" +import ast +import re +import sys + +CAMEL_RE = re.compile(r"^[a-z][a-z0-9]*[A-Z]") +IGNORE = {"setUp", "tearDown", "setUpClass", "tearDownClass"} + + +def check_file(path: str) -> list[tuple[int, str]]: + violations = [] + with open(path) as f: + tree = ast.parse(f.read(), filename=path) + for node in ast.walk(tree): + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): + for arg in node.args.args + node.args.kwonlyargs: + if CAMEL_RE.match(arg.arg) and arg.arg not in IGNORE: + violations.append((node.lineno, arg.arg)) + return violations + + +if __name__ == "__main__": + files = sys.argv[1:] or [] + found = False + for path in files: + for lineno, name in check_file(path): + print(f"{path}:{lineno}: camelCase parameter '{name}'") + found = True + sys.exit(1 if found else 0) From 866e76cc62828df36805cfc2bc801f608b57adac Mon Sep 17 00:00:00 2001 From: Blake Moore Date: Tue, 21 Apr 2026 14:47:05 +0100 Subject: [PATCH 02/20] rename camelCase parameters to snake_case with deprecation shims --- domino/domino.py | 165 ++++++++++++++++++++++++++++++++-------------- tests/test_app.py | 30 ++++----- 2 files changed, 131 insertions(+), 64 deletions(-) diff --git a/domino/domino.py b/domino/domino.py index 705a651a..489d88f5 100644 --- a/domino/domino.py +++ b/domino/domino.py @@ -112,22 +112,32 @@ def runs_list(self): def runs_start( self, command, - isDirect=False, - commitId=None, + is_direct=False, + commit_id=None, title=None, tier=None, - publishApiEndpoint=None, + publish_api_endpoint=None, + **kwargs, ): + if "isDirect" in kwargs: + warnings.warn("isDirect is deprecated, use is_direct", DeprecationWarning, stacklevel=2) + is_direct = kwargs.pop("isDirect") + if "commitId" in kwargs: + warnings.warn("commitId is deprecated, use commit_id", DeprecationWarning, stacklevel=2) + commit_id = kwargs.pop("commitId") + if "publishApiEndpoint" in kwargs: + warnings.warn("publishApiEndpoint is deprecated, use publish_api_endpoint", DeprecationWarning, stacklevel=2) + publish_api_endpoint = kwargs.pop("publishApiEndpoint") url = self._routes.runs_start() request = { "command": command, - "isDirect": isDirect, - "commitId": commitId, + "isDirect": is_direct, + "commitId": commit_id, "title": title, "tier": tier, - "publishApiEndpoint": publishApiEndpoint, + "publishApiEndpoint": publish_api_endpoint, } try: response = self.request_manager.post(url, json=request) @@ -141,14 +151,15 @@ def runs_start( def runs_start_blocking( self, command, - isDirect=False, - commitId=None, + is_direct=False, + commit_id=None, title=None, tier=None, - publishApiEndpoint=None, + publish_api_endpoint=None, poll_freq=5, max_poll_time=6000, retry_count=5, + **kwargs, ): """ Run a tasks that runs in a blocking loop that periodically checks to @@ -162,12 +173,12 @@ def runs_start_blocking( example: >> domino.runs_start(["main.py", "arg1", "arg2"]) - isDirect : boolean (Optional) + is_direct : boolean (Optional) Whether or not this command should be passed directly to a shell. - commitId : string (Optional) - The commitId to launch from. If not provided, will launch + commit_id : string (Optional) + The commit_id to launch from. If not provided, will launch from latest commit. title : string (Optional) @@ -177,7 +188,7 @@ def runs_start_blocking( The hardware tier to use for the run. Will use project default tier if not provided. - publishApiEndpoint : boolean (Optional) + publish_api_endpoint : boolean (Optional) Whether or not to publish an API endpoint from the resulting output. @@ -195,8 +206,18 @@ def runs_start_blocking( (in-case of transient http errors). If this threshold exceeds, an exception is raised. """ + if "isDirect" in kwargs: + warnings.warn("isDirect is deprecated, use is_direct", DeprecationWarning, stacklevel=2) + is_direct = kwargs.pop("isDirect") + if "commitId" in kwargs: + warnings.warn("commitId is deprecated, use commit_id", DeprecationWarning, stacklevel=2) + commit_id = kwargs.pop("commitId") + if "publishApiEndpoint" in kwargs: + warnings.warn("publishApiEndpoint is deprecated, use publish_api_endpoint", DeprecationWarning, stacklevel=2) + publish_api_endpoint = kwargs.pop("publishApiEndpoint") + run_response = self.runs_start( - command, isDirect, commitId, title, tier, publishApiEndpoint + command, is_direct, commit_id, title, tier, publish_api_endpoint ) run_id = run_response["runId"] @@ -239,7 +260,7 @@ def runs_start_blocking( # once task has finished running check to see if it was successful else: - stdout_msg = self.get_run_log(runId=run_id, includeSetupLog=False) + stdout_msg = self.get_run_log(run_id=run_id, include_setup_log=False) if run_info["status"] != "Succeeded": self.process_log(stdout_msg) @@ -251,31 +272,46 @@ def runs_start_blocking( return run_response - def run_stop(self, runId, saveChanges=True): + def run_stop(self, run_id=None, save_changes=True, **kwargs): + if "runId" in kwargs: + warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2) + run_id = kwargs.pop("runId") + if "saveChanges" in kwargs: + warnings.warn("saveChanges is deprecated, use save_changes", DeprecationWarning, stacklevel=2) + save_changes = kwargs.pop("saveChanges") self.log.warning("Use job_stop method instead") - return self.job_stop(job_id=runId, commit_results=saveChanges) + return self.job_stop(job_id=run_id, commit_results=save_changes) - def runs_status(self, runId): - url = self._routes.runs_status(runId) + def runs_status(self, run_id=None, **kwargs): + if "runId" in kwargs: + warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2) + run_id = kwargs.pop("runId") + url = self._routes.runs_status(run_id) return self._get(url) - def get_run_log(self, runId, includeSetupLog=True): + def get_run_log(self, run_id=None, include_setup_log=True, **kwargs): """ Get the unified log for a run (setup + stdout). parameters ---------- - runId : string + run_id : string the id associated with the run. - includeSetupLog : bool + include_setup_log : bool whether or not to include the setup log in the output. """ + if "runId" in kwargs: + warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2) + run_id = kwargs.pop("runId") + if "includeSetupLog" in kwargs: + warnings.warn("includeSetupLog is deprecated, use include_setup_log", DeprecationWarning, stacklevel=2) + include_setup_log = kwargs.pop("includeSetupLog") - url = self._routes.runs_stdout(runId) + url = self._routes.runs_stdout(run_id) logs = list() - if includeSetupLog: + if include_setup_log: logs.append(self._get(url)["setup"]) logs.append(self._get(url)["stdout"]) @@ -287,15 +323,18 @@ def get_run_info(self, run_id): if run_info["id"] == run_id: return run_info - def runs_stdout(self, runId): + def runs_stdout(self, run_id=None, **kwargs): """ Get std out emitted by a particular run. parameters ---------- - runId : string + run_id : string the id associated with the run. """ + if "runId" in kwargs: + warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2) + run_id = kwargs.pop("runId") html_start_tags = ( "
 Optional[str]:
-        if id is None:
+    def __app_get_status(self, app_id) -> Optional[str]:
+        if app_id is None:
             return None
-        url = self._routes.app_get(id)
+        url = self._routes.app_get(app_id)
         response = self.request_manager.get(url).json()
         return response.get("status", None)
 
diff --git a/tests/test_app.py b/tests/test_app.py
index a3284b93..7b84656d 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -10,7 +10,7 @@
 @pytest.fixture
 def mock_app_publish_setup(requests_mock, dummy_hostname):
     """
-    Mocks all API calls that app_publish() depends on when an appId is provided.
+    Mocks all API calls that app_publish() depends on when an app_id is provided.
 
     If any dependent calls are added to app_publish or app_unpublish, they
     must be mocked here as well.
@@ -44,7 +44,7 @@ def test_app_publish_with_branch(requests_mock, dummy_hostname):
     in the app start request body.
     """
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    d.app_publish(appId=MOCK_APP_ID, branch="my-feature-branch")
+    d.app_publish(app_id=MOCK_APP_ID, branch="my-feature-branch")
 
     app_start_request = next(
         req for req in requests_mock.request_history
@@ -59,11 +59,11 @@ def test_app_publish_with_branch(requests_mock, dummy_hostname):
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_app_publish_setup")
 def test_app_publish_with_commit_id(requests_mock, dummy_hostname):
     """
-    Confirm that the commitId parameter is correctly formatted as mainRepoGitRef
+    Confirm that the commit_id parameter is correctly formatted as mainRepoGitRef
     in the app start request body.
     """
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    d.app_publish(appId=MOCK_APP_ID, commitId="abc123def456")
+    d.app_publish(app_id=MOCK_APP_ID, commit_id="abc123def456")
 
     app_start_request = next(
         req for req in requests_mock.request_history
@@ -79,10 +79,10 @@ def test_app_publish_with_commit_id(requests_mock, dummy_hostname):
 def test_app_publish_omits_git_ref_when_not_provided(requests_mock, dummy_hostname):
     """
     Confirm that mainRepoGitRef is omitted from the request body when neither
-    branch nor commitId is provided.
+    branch nor commit_id is provided.
     """
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    d.app_publish(appId=MOCK_APP_ID)
+    d.app_publish(app_id=MOCK_APP_ID)
 
     app_start_request = next(
         req for req in requests_mock.request_history
@@ -94,21 +94,21 @@ def test_app_publish_omits_git_ref_when_not_provided(requests_mock, dummy_hostna
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_app_publish_setup")
 def test_app_publish_raises_if_both_branch_and_commit_id_provided(dummy_hostname):
     """
-    Confirm that providing both branch and commitId raises a ValueError.
+    Confirm that providing both branch and commit_id raises a ValueError.
     """
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    with pytest.raises(ValueError, match="Only one of commitId or branch"):
-        d.app_publish(appId=MOCK_APP_ID, branch="my-branch", commitId="abc123")
+    with pytest.raises(ValueError, match="Only one of commit_id or branch"):
+        d.app_publish(app_id=MOCK_APP_ID, branch="my-branch", commit_id="abc123")
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_app_publish_setup")
 def test_app_publish_unpublishes_running_app(requests_mock, dummy_hostname):
     """
     Confirm that app_publish calls stop on the running app before starting
-    when unpublishRunningApps=True (the default).
+    when unpublish_running_apps=True (the default).
     """
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    d.app_publish(appId=MOCK_APP_ID, unpublishRunningApps=True)
+    d.app_publish(app_id=MOCK_APP_ID, unpublish_running_apps=True)
 
     stop_requests = [
         req for req in requests_mock.request_history
@@ -120,10 +120,10 @@ def test_app_publish_unpublishes_running_app(requests_mock, dummy_hostname):
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_app_publish_setup")
 def test_app_publish_skips_unpublish_when_disabled(requests_mock, dummy_hostname):
     """
-    Confirm that app_publish does not call stop when unpublishRunningApps=False.
+    Confirm that app_publish does not call stop when unpublish_running_apps=False.
     """
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    d.app_publish(appId=MOCK_APP_ID, unpublishRunningApps=False)
+    d.app_publish(app_id=MOCK_APP_ID, unpublish_running_apps=False)
 
     stop_requests = [
         req for req in requests_mock.request_history
@@ -135,11 +135,11 @@ def test_app_publish_skips_unpublish_when_disabled(requests_mock, dummy_hostname
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_app_publish_setup")
 def test_app_publish_targets_specific_app_id(requests_mock, dummy_hostname):
     """
-    Confirm that the provided appId is used in the start endpoint URL, not the
+    Confirm that the provided app_id is used in the start endpoint URL, not the
     default first-app lookup.
     """
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    d.app_publish(appId=MOCK_APP_ID)
+    d.app_publish(app_id=MOCK_APP_ID)
 
     start_requests = [
         req for req in requests_mock.request_history

From e04dd07f700ade0034be401a11d7fe621e334f37 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 15:24:35 +0100
Subject: [PATCH 03/20] update docstring to use commit_id instead of commitId

---
 domino/domino.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/domino/domino.py b/domino/domino.py
index 489d88f5..ee26c6c3 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -374,7 +374,7 @@ def job_start(
                                                     Command to execute in Job
                                                     >> domino.job_start(command="main.py arg1 arg2")
         :param commit_id:                           string (Optional)
-                                                    The commitId to launch from. If not provided, will launch
+                                                    The commit_id to launch from. If not provided, will launch
                                                     from latest commit.
         :param hardware_tier_id:                    string (Optional)
                                                     The hardware tier ID to launch job in. If not provided

From 69cc3967824a91b432ea0feea566f3766edf9b11 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 15:58:30 +0100
Subject: [PATCH 04/20] add deprecation warning tests for renamed camelCase
 parameters

---
 domino/domino.py           |   1 +
 tests/test_deprecations.py | 215 +++++++++++++++++++++++++++++++++++++
 2 files changed, 216 insertions(+)
 create mode 100644 tests/test_deprecations.py

diff --git a/domino/domino.py b/domino/domino.py
index ee26c6c3..5ea84769 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -29,6 +29,7 @@
 from domino._custom_metrics import _CustomMetricsClientBase, _CustomMetricsClientGen, _CustomMetricsClient
 
 
+
 class Domino:
     def __init__(
         self, project, api_key=None, host=None, domino_token_file=None, auth_token=None, api_proxy=None,
diff --git a/tests/test_deprecations.py b/tests/test_deprecations.py
new file mode 100644
index 00000000..b3acf8b6
--- /dev/null
+++ b/tests/test_deprecations.py
@@ -0,0 +1,215 @@
+"""
+Tests confirming that renamed camelCase parameters emit DeprecationWarning
+and that the call still succeeds with the old name.
+"""
+import pytest
+
+from domino import Domino
+
+MOCK_PROJECT_ID = "aabbccddeeff001122334454"
+MOCK_RUN_ID = "aabbccddeeff001122334455"
+MOCK_APP_ID = "aabbccddeeff001122334457"
+MOCK_COMMIT_ID = "aabbcc112233"
+
+
+@pytest.fixture
+def client(requests_mock, dummy_hostname):
+    requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"})
+    return Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+
+
+# ---------------------------------------------------------------------------
+# runs_start
+# ---------------------------------------------------------------------------
+
+class TestRunsStartDeprecations:
+    @pytest.fixture(autouse=True)
+    def mock_runs_start(self, requests_mock, dummy_hostname):
+        requests_mock.post(
+            f"{dummy_hostname}/v1/projects/anyuser/anyproject/runs",
+            json={"runId": MOCK_RUN_ID},
+        )
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_isDirect_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="isDirect is deprecated"):
+            client.runs_start("main.py", isDirect=True)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_commitId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="commitId is deprecated"):
+            client.runs_start("main.py", commitId=MOCK_COMMIT_ID)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_publishApiEndpoint_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="publishApiEndpoint is deprecated"):
+            client.runs_start("main.py", publishApiEndpoint=True)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_new_names_accepted_without_warning(self, client, recwarn):
+        client.runs_start("main.py", is_direct=True, commit_id=MOCK_COMMIT_ID)
+        deprecation_warnings = [w for w in recwarn.list if issubclass(w.category, DeprecationWarning)
+                                 and "deprecated" in str(w.message).lower()
+                                 and any(x in str(w.message) for x in ["isDirect", "commitId"])]
+        assert len(deprecation_warnings) == 0
+
+
+# ---------------------------------------------------------------------------
+# runs_start_blocking (spot-check one param — same shim logic)
+# ---------------------------------------------------------------------------
+
+class TestRunsStartBlockingDeprecations:
+    @pytest.fixture(autouse=True)
+    def mock_endpoints(self, requests_mock, dummy_hostname):
+        requests_mock.post(
+            f"{dummy_hostname}/v1/projects/anyuser/anyproject/runs",
+            json={"runId": MOCK_RUN_ID, "outputCommitId": MOCK_COMMIT_ID, "status": "Succeeded"},
+        )
+        requests_mock.get(
+            f"{dummy_hostname}/v1/projects/anyuser/anyproject/runs",
+            json={"data": [{"id": MOCK_RUN_ID, "outputCommitId": MOCK_COMMIT_ID, "status": "Succeeded"}]},
+        )
+        requests_mock.get(
+            f"{dummy_hostname}/v1/projects/anyuser/anyproject/run/{MOCK_RUN_ID}/stdout",
+            json={"setup": "", "stdout": "done"},
+        )
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_isDirect_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="isDirect is deprecated"):
+            client.runs_start_blocking("main.py", isDirect=True, poll_freq=1, max_poll_time=5)
+
+
+# ---------------------------------------------------------------------------
+# run_stop / runs_status / get_run_log / runs_stdout
+# ---------------------------------------------------------------------------
+
+class TestLegacyRunMethodDeprecations:
+    @pytest.fixture(autouse=True)
+    def mock_run_endpoints(self, requests_mock, dummy_hostname):
+        requests_mock.get(
+            f"{dummy_hostname}/v4/gateway/projects/findProjectByOwnerAndName"
+            "?ownerName=anyuser&projectName=anyproject",
+            json={"id": MOCK_PROJECT_ID},
+        )
+        requests_mock.post(f"{dummy_hostname}/v4/jobs/stop", json={})
+        requests_mock.get(
+            f"{dummy_hostname}/v1/projects/anyuser/anyproject/runs/{MOCK_RUN_ID}",
+            json={"id": MOCK_RUN_ID, "status": "Succeeded"},
+        )
+        requests_mock.get(
+            f"{dummy_hostname}/v1/projects/anyuser/anyproject/run/{MOCK_RUN_ID}/stdout",
+            json={"setup": "setup log", "stdout": "hello"},
+        )
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_run_stop_runId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="runId is deprecated"):
+            client.run_stop(runId=MOCK_RUN_ID)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_run_stop_saveChanges_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="saveChanges is deprecated"):
+            client.run_stop(run_id=MOCK_RUN_ID, saveChanges=False)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_runs_status_runId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="runId is deprecated"):
+            client.runs_status(runId=MOCK_RUN_ID)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_get_run_log_runId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="runId is deprecated"):
+            client.get_run_log(runId=MOCK_RUN_ID)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_get_run_log_includeSetupLog_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="includeSetupLog is deprecated"):
+            client.get_run_log(run_id=MOCK_RUN_ID, includeSetupLog=False)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_runs_stdout_runId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="runId is deprecated"):
+            client.runs_stdout(runId=MOCK_RUN_ID)
+
+
+# ---------------------------------------------------------------------------
+# files_list
+# ---------------------------------------------------------------------------
+
+class TestFilesListDeprecations:
+    @pytest.fixture(autouse=True)
+    def mock_files_list(self, requests_mock, dummy_hostname):
+        requests_mock.get(
+            f"{dummy_hostname}/v1/projects/anyuser/anyproject/files/{MOCK_COMMIT_ID}//",
+            json={"data": []},
+        )
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_commitId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="commitId is deprecated"):
+            client.files_list(commitId=MOCK_COMMIT_ID)
+
+
+# ---------------------------------------------------------------------------
+# endpoint_publish
+# ---------------------------------------------------------------------------
+
+class TestEndpointPublishDeprecations:
+    @pytest.fixture(autouse=True)
+    def mock_endpoint_publish(self, requests_mock, dummy_hostname):
+        requests_mock.post(
+            f"{dummy_hostname}/v1/anyuser/anyproject/endpoint/publishRelease",
+            json={},
+        )
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_commitId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="commitId is deprecated"):
+            client.endpoint_publish("app.py", "predict", commitId=MOCK_COMMIT_ID)
+
+
+# ---------------------------------------------------------------------------
+# app_publish / app_unpublish
+# ---------------------------------------------------------------------------
+
+class TestAppDeprecations:
+    @pytest.fixture(autouse=True)
+    def mock_app_endpoints(self, requests_mock, dummy_hostname):
+        requests_mock.get(
+            f"{dummy_hostname}/v4/modelproducts/{MOCK_APP_ID}",
+            json={"id": MOCK_APP_ID, "status": "Stopped"},
+        )
+        requests_mock.post(
+            f"{dummy_hostname}/v4/modelproducts/{MOCK_APP_ID}/stop",
+            json={},
+        )
+        requests_mock.post(
+            f"{dummy_hostname}/v4/modelproducts/{MOCK_APP_ID}/start",
+            json={"id": MOCK_APP_ID, "status": "Running"},
+        )
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_app_publish_appId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="appId is deprecated"):
+            client.app_publish(appId=MOCK_APP_ID)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_app_publish_unpublishRunningApps_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="unpublishRunningApps is deprecated"):
+            client.app_publish(app_id=MOCK_APP_ID, unpublishRunningApps=True)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_app_publish_commitId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="commitId is deprecated"):
+            client.app_publish(app_id=MOCK_APP_ID, commitId=MOCK_COMMIT_ID)
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_app_publish_environmentId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="environmentId is deprecated"):
+            client.app_publish(app_id=MOCK_APP_ID, environmentId="env-123")
+
+    @pytest.mark.usefixtures("clear_token_file_from_env")
+    def test_app_unpublish_appId_warns(self, client):
+        with pytest.warns(DeprecationWarning, match="appId is deprecated"):
+            client.app_unpublish(appId=MOCK_APP_ID)

From a2b073903e8bd6579ac659ff87117c4789752316 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 16:06:31 +0100
Subject: [PATCH 05/20] Updated CHANGELOG

---
 CHANGELOG.md | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5c501eb9..dd18ecdd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,32 @@ All notable changes to the `python-domino` library will be documented in this fi
 * Updated app_publish() to allow selecting branch/commitRef
 * Updated app_publish() to allow selecing specific app
 
+### Deprecated
+The following public API parameters have been renamed to follow PEP 8 (`snake_case`).
+The old names continue to work but will emit a `DeprecationWarning`. They will be
+removed in the next major version.
+
+| Method | Old name | New name |
+|--------|----------|----------|
+| `runs_start`, `runs_start_blocking` | `isDirect` | `is_direct` |
+| `runs_start`, `runs_start_blocking` | `commitId` | `commit_id` |
+| `runs_start`, `runs_start_blocking` | `publishApiEndpoint` | `publish_api_endpoint` |
+| `run_stop` | `runId` | `run_id` |
+| `run_stop` | `saveChanges` | `save_changes` |
+| `runs_status` | `runId` | `run_id` |
+| `get_run_log` | `runId` | `run_id` |
+| `get_run_log` | `includeSetupLog` | `include_setup_log` |
+| `runs_stdout` | `runId` | `run_id` |
+| `files_list` | `commitId` | `commit_id` |
+| `endpoint_publish` | `commitId` | `commit_id` |
+| `app_publish` | `unpublishRunningApps` | `unpublish_running_apps` |
+| `app_publish` | `hardwareTierId` | `hardware_tier_id` |
+| `app_publish` | `environmentId` | `environment_id` |
+| `app_publish` | `externalVolumeMountIds` | `external_volume_mount_ids` |
+| `app_publish` | `commitId` | `commit_id` |
+| `app_publish` | `appId` | `app_id` |
+| `app_unpublish` | `appId` | `app_id` |
+
 ### Changed
 
 ## [2.1.0]

From 7bbc93dbf750d4fcb866a332183e67cbc415717f Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 16:11:42 +0100
Subject: [PATCH 06/20] Updated flake8 config so that it works

---
 .flake8 | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/.flake8 b/.flake8
index e1e9871f..1cfb3043 100644
--- a/.flake8
+++ b/.flake8
@@ -1,14 +1,23 @@
 [flake8]
 max-complexity = 33
-# Default plus E266 - There should be only one leading # for a block comment
 ignore =
-    E121,  # A line is less indented than it should be for hanging indents
-    E123,  # Closing brackets should match the same indentation level of the line that their opening bracket started on
-    E126,  # A continuation line is indented farther than it should be for a hanging indent
-    E226,  # There should be one space before and after an arithmetic operator (+, -, /, and *)
-    E704,  # Multiple statements of a function definition should be on their own separate lines
-    W503,  # Line breaks should occur after the binary operator to keep all variable names aligned
-    W504,  # Line breaks should occur before the binary operator to keep all operators aligned
-    E266,  # There should be only one leading # for a block comment
-    E203,  # Colons should not have any space before them
-    E501,  # Line lengths are recommended to be no greater than 79 characters
+    # A line is less indented than it should be for hanging indents
+    E121,
+    # Closing brackets should match the same indentation level
+    E123,
+    # A continuation line is indented farther than it should be
+    E126,
+    # There should be one space before and after an arithmetic operator
+    E226,
+    # Multiple statements of a function definition should be on their own lines
+    E704,
+    # Line breaks should occur after the binary operator
+    W503,
+    # Line breaks should occur before the binary operator
+    W504,
+    # There should be only one leading # for a block comment
+    E266,
+    # Colons should not have any space before them
+    E203,
+    # Line lengths are recommended to be no greater than 79 characters
+    E501,

From 06c037fb48d880879d0f380f0878dd8b32d35646 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 16:31:13 +0100
Subject: [PATCH 07/20] pep8/ruff fixing some incosistencies

---
 docs/source/conf.py                           |    2 +-
 domino/_custom_metrics.py                     |   14 +-
 domino/_impl/custommetrics/apis/__init__.py   |    2 +-
 domino/_impl/custommetrics/configuration.py   |    1 +
 .../model/failure_envelope_v1.py              |   33 +-
 .../model/invalid_body_envelope_v1.py         |   19 +-
 .../_impl/custommetrics/model/metadata_v1.py  |   33 +-
 .../model/metric_alert_request_v1.py          |   40 +-
 .../custommetrics/model/metric_tag_v1.py      |   23 +-
 .../custommetrics/model/metric_value_v1.py    |   42 +-
 .../model/metric_values_envelope_v1.py        |   42 +-
 .../model/new_metric_value_v1.py              |   50 +-
 .../model/new_metric_values_envelope_v1.py    |   34 +-
 .../custommetrics/model/target_range_v1.py    |   43 +-
 .../paths/api_metric_alerts_v1/__init__.py    |    2 +-
 .../paths/api_metric_alerts_v1/post.py        |   10 +-
 .../paths/api_metric_alerts_v1/post.pyi       |    1 -
 .../paths/api_metric_values_v1/__init__.py    |    2 +-
 .../paths/api_metric_values_v1/post.py        |   10 +-
 .../paths/api_metric_values_v1/post.pyi       |    1 -
 .../__init__.py                               |    2 +-
 .../get.py                                    |    7 +-
 .../get.pyi                                   |    1 -
 domino/_impl/custommetrics/rest.py            |    1 -
 domino/_impl/custommetrics/schemas.py         |  135 +-
 domino/agents/logging/dominorun.py            |    1 +
 domino/agents/logging/logging.py              |    8 +-
 domino/agents/read_agent_config.py            |    6 +-
 domino/agents/tracing/tracing.py              |    8 +-
 domino/authentication.py                      |    1 -
 domino/datasets.py                            |    4 +-
 domino/domino.py                              |   63 +-
 domino/exceptions.py                          |    2 +
 domino/http_request_manager.py                |    3 +-
 domino/routes.py                              |    6 +-
 examples/example_budget_manager.py            |    2 +-
 setup.py                                      |    8 +-
 .../test_models/test_failure_envelope_v1.py   |    2 -
 .../test_invalid_body_envelope_v1.py          |    2 -
 .../test_models/test_metadata_v1.py           |    2 -
 .../test_metric_alert_request_v1.py           |    2 -
 .../test_models/test_metric_tag_v1.py         |    2 -
 .../test_models/test_metric_value_v1.py       |    2 -
 .../test_metric_values_envelope_v1.py         |    2 -
 .../test_models/test_new_metric_value_v1.py   |    2 -
 .../test_new_metric_values_envelope_v1.py     |    2 -
 .../test_models/test_target_range_v1.py       |    2 -
 .../test_api_metric_alerts_v1/test_post.py    |    7 +-
 .../test_api_metric_values_v1/test_post.py    |    7 +-
 .../test_get.py                               |    7 +-
 tests/agents/test_agents_eval_tags.py         |    5 +-
 tests/agents/test_read_agent_config.py        |   59 +-
 tests/agents/test_verify_domino_support.py    |  103 +-
 tests/conftest.py                             |    3 +-
 tests/integration/agents/conftest.py          |  136 +-
 tests/integration/agents/mlflow_fixtures.py   |   97 +-
 tests/integration/agents/test_domino_run.py   |  557 +++---
 tests/integration/agents/test_logging.py      |  115 +-
 tests/integration/agents/test_tracing.py      | 1569 +++++++++--------
 tests/integration/agents/test_util.py         |    6 +-
 tests/test_basic_auth.py                      |    1 +
 tests/test_datasets.py                        |    6 +-
 tests/test_deprecations.py                    |    4 +-
 tests/test_domino.py                          |   29 +-
 tests/test_spark_operator.py                  |    5 +-
 65 files changed, 1739 insertions(+), 1659 deletions(-)

diff --git a/docs/source/conf.py b/docs/source/conf.py
index 83897642..c4f684d0 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -15,7 +15,7 @@
 
 autodoc_default_options = {
     'members': True,
-    'undoc-members': False, # Don't show undocumented members
+    'undoc-members': False,  # Don't show undocumented members
     'show-inheritance': False,
 }
 
diff --git a/domino/_custom_metrics.py b/domino/_custom_metrics.py
index 5da5a0bc..4e1f3745 100644
--- a/domino/_custom_metrics.py
+++ b/domino/_custom_metrics.py
@@ -17,15 +17,9 @@
 from ._impl.custommetrics.paths.api_metric_values_v1.post import (
     request_body_new_metric_values_envelope_v1,
 )
-from ._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric.get import (
-    _response_for_200,
-    ApiResponseFor200,
-)
 from ._impl.custommetrics.model.metric_values_envelope_v1 import MetricValuesEnvelopeV1
-from ._impl.custommetrics.model.metadata_v1 import MetadataV1
 from ._impl.custommetrics.api_client import SerializedRequestBody
 from ._impl.custommetrics import schemas
-from ._impl.custommetrics import configuration
 
 
 class _CustomMetricsClientBase(ABC):
@@ -106,14 +100,14 @@ def trigger_alert(
             modelMonitoringId=model_monitoring_id,
             metric=metric,
             value=value,
-            targetRange=target_range, # type: ignore
+            targetRange=target_range,  # type: ignore
             description=description,
         )
         ser_body: SerializedRequestBody = (
             request_body_metric_alert_request_v1.serialize(req, "application/json")
         )
         json_data = json.loads(
-            ser_body["body"].decode("utf-8") # type: ignore
+            ser_body["body"].decode("utf-8")  # type: ignore
         )  # extra work to reuse request_manager
         self._parent.request_manager.post(url, json=json_data)
 
@@ -136,7 +130,7 @@ def log_metric(
         self.log_metrics([item])
 
     def _to_new_metric_value(self, item: Dict) -> NewMetricValueV1:
-        tags: Union[List[MetricTagV1],schemas.Unset] = schemas.unset
+        tags: Union[List[MetricTagV1], schemas.Unset] = schemas.unset
         if "tags" in item:
             tags = [MetricTagV1(key=k, value=v) for k, v in item["tags"].items()]
         ret = NewMetricValueV1(
@@ -158,7 +152,7 @@ def log_metrics(self, metric_values_array: List) -> None:
             )
         )
         json_data = json.loads(
-            ser_body["body"].decode("utf-8") # type: ignore
+            ser_body["body"].decode("utf-8")  # type: ignore
         )  # extra work to reuse request_manager
         self._parent.request_manager.post(url, json=json_data)
 
diff --git a/domino/_impl/custommetrics/apis/__init__.py b/domino/_impl/custommetrics/apis/__init__.py
index 7840f772..5ca66b80 100644
--- a/domino/_impl/custommetrics/apis/__init__.py
+++ b/domino/_impl/custommetrics/apis/__init__.py
@@ -1,3 +1,3 @@
 # do not import all endpoints into this module because that uses a lot of memory and stack frames
 # if you need the ability to import all endpoints then import them from
-# tags, paths, or path_to_api, or tag_to_api
\ No newline at end of file
+# tags, paths, or path_to_api, or tag_to_api
diff --git a/domino/_impl/custommetrics/configuration.py b/domino/_impl/custommetrics/configuration.py
index 14083c14..59086c30 100644
--- a/domino/_impl/custommetrics/configuration.py
+++ b/domino/_impl/custommetrics/configuration.py
@@ -26,6 +26,7 @@
     'uniqueItems', 'maxProperties', 'minProperties',
 }
 
+
 class Configuration(object):
     """NOTE: This class is auto generated by OpenAPI Generator
 
diff --git a/domino/_impl/custommetrics/model/failure_envelope_v1.py b/domino/_impl/custommetrics/model/failure_envelope_v1.py
index 4bf16844..f2d8d63b 100644
--- a/domino/_impl/custommetrics/model/failure_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/failure_envelope_v1.py
@@ -32,25 +32,22 @@ class FailureEnvelopeV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "requestId",
             "errors",
         }
-        
+
         class properties:
             requestId = schemas.StrSchema
-            
-            
+
             class errors(
                 schemas.ListSchema
             ):
-            
-            
+
                 class MetaOapg:
                     items = schemas.StrSchema
-            
+
                 def __new__(
                     cls,
                     arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
@@ -61,43 +58,41 @@ def __new__(
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> MetaOapg.items:
                     return super().__getitem__(i)
             __annotations__ = {
                 "requestId": requestId,
                 "errors": errors,
             }
-    
+
     requestId: MetaOapg.properties.requestId
     errors: MetaOapg.properties.errors
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["errors"]) -> MetaOapg.properties.errors: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["requestId", "errors", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["errors"]) -> MetaOapg.properties.errors: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requestId", "errors", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
diff --git a/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py b/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py
index 72bdcfd6..f924b41a 100644
--- a/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py
@@ -32,40 +32,37 @@ class InvalidBodyEnvelopeV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "message",
         }
-        
+
         class properties:
             message = schemas.StrSchema
             __annotations__ = {
                 "message": message,
             }
-    
+
     message: MetaOapg.properties.message
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["message", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["message", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
diff --git a/domino/_impl/custommetrics/model/metadata_v1.py b/domino/_impl/custommetrics/model/metadata_v1.py
index 7ad772c5..be027e44 100644
--- a/domino/_impl/custommetrics/model/metadata_v1.py
+++ b/domino/_impl/custommetrics/model/metadata_v1.py
@@ -32,25 +32,22 @@ class MetadataV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "notices",
             "requestId",
         }
-        
+
         class properties:
             requestId = schemas.StrSchema
-            
-            
+
             class notices(
                 schemas.ListSchema
             ):
-            
-            
+
                 class MetaOapg:
                     items = schemas.StrSchema
-            
+
                 def __new__(
                     cls,
                     arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
@@ -61,43 +58,41 @@ def __new__(
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> MetaOapg.items:
                     return super().__getitem__(i)
             __annotations__ = {
                 "requestId": requestId,
                 "notices": notices,
             }
-    
+
     notices: MetaOapg.properties.notices
     requestId: MetaOapg.properties.requestId
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["notices"]) -> MetaOapg.properties.notices: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["requestId", "notices", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["notices"]) -> MetaOapg.properties.notices: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requestId", "notices", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
diff --git a/domino/_impl/custommetrics/model/metric_alert_request_v1.py b/domino/_impl/custommetrics/model/metric_alert_request_v1.py
index b8e209b0..22a8c0bc 100644
--- a/domino/_impl/custommetrics/model/metric_alert_request_v1.py
+++ b/domino/_impl/custommetrics/model/metric_alert_request_v1.py
@@ -9,6 +9,7 @@
     Generated by: https://openapi-generator.tech
 """
 
+from .target_range_v1 import TargetRangeV1
 from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
@@ -32,7 +33,6 @@ class MetricAlertRequestV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "metric",
@@ -40,12 +40,12 @@ class MetaOapg:
             "targetRange",
             "value",
         }
-        
+
         class properties:
             modelMonitoringId = schemas.StrSchema
             metric = schemas.StrSchema
             value = schemas.NumberSchema
-        
+
             @staticmethod
             def targetRange() -> typing.Type['TargetRangeV1']:
                 return TargetRangeV1
@@ -57,56 +57,54 @@ def targetRange() -> typing.Type['TargetRangeV1']:
                 "targetRange": targetRange,
                 "description": description,
             }
-    
+
     metric: MetaOapg.properties.metric
     modelMonitoringId: MetaOapg.properties.modelMonitoringId
     targetRange: 'TargetRangeV1'
     value: MetaOapg.properties.value
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["targetRange"]) -> 'TargetRangeV1': ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "targetRange", "description", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["targetRange"]) -> 'TargetRangeV1': ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "targetRange", "description", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
@@ -130,5 +128,3 @@ def __new__(
             _configuration=_configuration,
             **kwargs,
         )
-
-from .target_range_v1 import TargetRangeV1
diff --git a/domino/_impl/custommetrics/model/metric_tag_v1.py b/domino/_impl/custommetrics/model/metric_tag_v1.py
index a954d441..93bc19a4 100644
--- a/domino/_impl/custommetrics/model/metric_tag_v1.py
+++ b/domino/_impl/custommetrics/model/metric_tag_v1.py
@@ -32,13 +32,12 @@ class MetricTagV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "value",
             "key",
         }
-        
+
         class properties:
             key = schemas.StrSchema
             value = schemas.StrSchema
@@ -46,36 +45,34 @@ class properties:
                 "key": key,
                 "value": value,
             }
-    
+
     value: MetaOapg.properties.value
     key: MetaOapg.properties.key
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["key", "value", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["key", "value", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
diff --git a/domino/_impl/custommetrics/model/metric_value_v1.py b/domino/_impl/custommetrics/model/metric_value_v1.py
index c9d571db..ef8cc3fe 100644
--- a/domino/_impl/custommetrics/model/metric_value_v1.py
+++ b/domino/_impl/custommetrics/model/metric_value_v1.py
@@ -9,6 +9,7 @@
     Generated by: https://openapi-generator.tech
 """
 
+from .metric_tag_v1 import MetricTagV1
 from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
@@ -32,30 +33,27 @@ class MetricValueV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "referenceTimestamp",
             "value",
             "tags",
         }
-        
+
         class properties:
             referenceTimestamp = schemas.StrSchema
             value = schemas.NumberSchema
-            
-            
+
             class tags(
                 schemas.ListSchema
             ):
-            
-            
+
                 class MetaOapg:
-                    
+
                     @staticmethod
                     def items() -> typing.Type['MetricTagV1']:
                         return MetricTagV1
-            
+
                 def __new__(
                     cls,
                     arg: typing.Union[typing.Tuple['MetricTagV1'], typing.List['MetricTagV1']],
@@ -66,7 +64,7 @@ def __new__(
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> 'MetricTagV1':
                     return super().__getitem__(i)
             __annotations__ = {
@@ -74,43 +72,41 @@ def __getitem__(self, i: int) -> 'MetricTagV1':
                 "value": value,
                 "tags": tags,
             }
-    
+
     referenceTimestamp: MetaOapg.properties.referenceTimestamp
     value: MetaOapg.properties.value
     tags: MetaOapg.properties.tags
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["referenceTimestamp", "value", "tags", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["referenceTimestamp", "value", "tags", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
@@ -130,5 +126,3 @@ def __new__(
             _configuration=_configuration,
             **kwargs,
         )
-
-from .metric_tag_v1 import MetricTagV1
diff --git a/domino/_impl/custommetrics/model/metric_values_envelope_v1.py b/domino/_impl/custommetrics/model/metric_values_envelope_v1.py
index 5a7c61f2..2f410b9c 100644
--- a/domino/_impl/custommetrics/model/metric_values_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/metric_values_envelope_v1.py
@@ -9,6 +9,8 @@
     Generated by: https://openapi-generator.tech
 """
 
+from .metadata_v1 import MetadataV1
+from .metric_value_v1 import MetricValueV1
 from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
@@ -32,27 +34,24 @@ class MetricValuesEnvelopeV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "metadata",
             "metricValues",
         }
-        
+
         class properties:
-            
-            
+
             class metricValues(
                 schemas.ListSchema
             ):
-            
-            
+
                 class MetaOapg:
-                    
+
                     @staticmethod
                     def items() -> typing.Type['MetricValueV1']:
                         return MetricValueV1
-            
+
                 def __new__(
                     cls,
                     arg: typing.Union[typing.Tuple['MetricValueV1'], typing.List['MetricValueV1']],
@@ -63,10 +62,10 @@ def __new__(
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> 'MetricValueV1':
                     return super().__getitem__(i)
-        
+
             @staticmethod
             def metadata() -> typing.Type['MetadataV1']:
                 return MetadataV1
@@ -74,36 +73,34 @@ def metadata() -> typing.Type['MetadataV1']:
                 "metricValues": metricValues,
                 "metadata": metadata,
             }
-    
+
     metadata: 'MetadataV1'
     metricValues: MetaOapg.properties.metricValues
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["metricValues"]) -> MetaOapg.properties.metricValues: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> 'MetadataV1': ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["metricValues", "metadata", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["metricValues"]) -> MetaOapg.properties.metricValues: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> 'MetadataV1': ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["metricValues", "metadata", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
@@ -121,6 +118,3 @@ def __new__(
             _configuration=_configuration,
             **kwargs,
         )
-
-from .metadata_v1 import MetadataV1
-from .metric_value_v1 import MetricValueV1
diff --git a/domino/_impl/custommetrics/model/new_metric_value_v1.py b/domino/_impl/custommetrics/model/new_metric_value_v1.py
index 985a5f1e..de20f9f4 100644
--- a/domino/_impl/custommetrics/model/new_metric_value_v1.py
+++ b/domino/_impl/custommetrics/model/new_metric_value_v1.py
@@ -9,6 +9,7 @@
     Generated by: https://openapi-generator.tech
 """
 
+from .metric_tag_v1 import MetricTagV1
 from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
@@ -32,7 +33,6 @@ class NewMetricValueV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "referenceTimestamp",
@@ -40,25 +40,23 @@ class MetaOapg:
             "modelMonitoringId",
             "value",
         }
-        
+
         class properties:
             modelMonitoringId = schemas.StrSchema
             metric = schemas.StrSchema
             value = schemas.NumberSchema
             referenceTimestamp = schemas.StrSchema
-            
-            
+
             class tags(
                 schemas.ListSchema
             ):
-            
-            
+
                 class MetaOapg:
-                    
+
                     @staticmethod
                     def items() -> typing.Type['MetricTagV1']:
                         return MetricTagV1
-            
+
                 def __new__(
                     cls,
                     arg: typing.Union[typing.Tuple['MetricTagV1'], typing.List['MetricTagV1']],
@@ -69,7 +67,7 @@ def __new__(
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> 'MetricTagV1':
                     return super().__getitem__(i)
             __annotations__ = {
@@ -79,56 +77,54 @@ def __getitem__(self, i: int) -> 'MetricTagV1':
                 "referenceTimestamp": referenceTimestamp,
                 "tags": tags,
             }
-    
+
     referenceTimestamp: MetaOapg.properties.referenceTimestamp
     metric: MetaOapg.properties.metric
     modelMonitoringId: MetaOapg.properties.modelMonitoringId
     value: MetaOapg.properties.value
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "referenceTimestamp", "tags", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["tags"]) -> typing.Union[MetaOapg.properties.tags, schemas.Unset]: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "referenceTimestamp", "tags", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
@@ -152,5 +148,3 @@ def __new__(
             _configuration=_configuration,
             **kwargs,
         )
-
-from .metric_tag_v1 import MetricTagV1
diff --git a/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py b/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py
index b4b0c5ab..4dfa62b3 100644
--- a/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py
@@ -9,6 +9,7 @@
     Generated by: https://openapi-generator.tech
 """
 
+from .new_metric_value_v1 import NewMetricValueV1
 from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
@@ -32,26 +33,23 @@ class NewMetricValuesEnvelopeV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "newMetricValues",
         }
-        
+
         class properties:
-            
-            
+
             class newMetricValues(
                 schemas.ListSchema
             ):
-            
-            
+
                 class MetaOapg:
-                    
+
                     @staticmethod
                     def items() -> typing.Type['NewMetricValueV1']:
                         return NewMetricValueV1
-            
+
                 def __new__(
                     cls,
                     arg: typing.Union[typing.Tuple['NewMetricValueV1'], typing.List['NewMetricValueV1']],
@@ -62,35 +60,33 @@ def __new__(
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> 'NewMetricValueV1':
                     return super().__getitem__(i)
             __annotations__ = {
                 "newMetricValues": newMetricValues,
             }
-    
+
     newMetricValues: MetaOapg.properties.newMetricValues
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["newMetricValues"]) -> MetaOapg.properties.newMetricValues: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["newMetricValues", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["newMetricValues"]) -> MetaOapg.properties.newMetricValues: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["newMetricValues", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
@@ -106,5 +102,3 @@ def __new__(
             _configuration=_configuration,
             **kwargs,
         )
-
-from .new_metric_value_v1 import NewMetricValueV1
diff --git a/domino/_impl/custommetrics/model/target_range_v1.py b/domino/_impl/custommetrics/model/target_range_v1.py
index b565481b..25457ad4 100644
--- a/domino/_impl/custommetrics/model/target_range_v1.py
+++ b/domino/_impl/custommetrics/model/target_range_v1.py
@@ -32,21 +32,18 @@ class TargetRangeV1(
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "condition",
         }
-        
+
         class properties:
-            
-            
+
             class condition(
                 schemas.EnumBase,
                 schemas.StrSchema
             ):
-            
-            
+
                 class MetaOapg:
                     enum_value_to_name = {
                         "lessThan": "LESS_THAN",
@@ -55,23 +52,23 @@ class MetaOapg:
                         "greaterThanEqual": "GREATER_THAN_EQUAL",
                         "between": "BETWEEN",
                     }
-                
+
                 @schemas.classproperty
                 def LESS_THAN(cls):
                     return cls("lessThan")
-                
+
                 @schemas.classproperty
                 def LESS_THAN_EQUAL(cls):
                     return cls("lessThanEqual")
-                
+
                 @schemas.classproperty
                 def GREATER_THAN(cls):
                     return cls("greaterThan")
-                
+
                 @schemas.classproperty
                 def GREATER_THAN_EQUAL(cls):
                     return cls("greaterThanEqual")
-                
+
                 @schemas.classproperty
                 def BETWEEN(cls):
                     return cls("between")
@@ -82,41 +79,39 @@ def BETWEEN(cls):
                 "lowerLimit": lowerLimit,
                 "upperLimit": upperLimit,
             }
-    
+
     condition: MetaOapg.properties.condition
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["condition"]) -> MetaOapg.properties.condition: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["lowerLimit"]) -> MetaOapg.properties.lowerLimit: ...
-    
+
     @typing.overload
     def __getitem__(self, name: typing_extensions.Literal["upperLimit"]) -> MetaOapg.properties.upperLimit: ...
-    
+
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
+
     def __getitem__(self, name: typing.Union[typing_extensions.Literal["condition", "lowerLimit", "upperLimit", ], str]):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["condition"]) -> MetaOapg.properties.condition: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["lowerLimit"]) -> typing.Union[MetaOapg.properties.lowerLimit, schemas.Unset]: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: typing_extensions.Literal["upperLimit"]) -> typing.Union[MetaOapg.properties.upperLimit, schemas.Unset]: ...
-    
+
     @typing.overload
     def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
+
     def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["condition", "lowerLimit", "upperLimit", ], str]):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
diff --git a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/__init__.py b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/__init__.py
index dc46fd88..3ccc1d50 100644
--- a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/__init__.py
+++ b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/__init__.py
@@ -4,4 +4,4 @@
 
 from domino._impl.custommetrics.paths import PathValues
 
-path = PathValues.API_METRIC_ALERTS_V1
\ No newline at end of file
+path = PathValues.API_METRIC_ALERTS_V1
diff --git a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py
index b2eeffc8..f820cafd 100644
--- a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py
+++ b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py
@@ -18,7 +18,6 @@
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
 
 import frozendict  # noqa: F401
@@ -60,9 +59,8 @@ class SchemaFor400ResponseBodyApplicationJson(
     schemas.ComposedSchema,
 ):
 
-
     class MetaOapg:
-        
+
         @classmethod
         @functools.lru_cache()
         def one_of(cls):
@@ -78,7 +76,6 @@ def one_of(cls):
                 InvalidBodyEnvelopeV1,
             ]
 
-
     def __new__(
         cls,
         *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
@@ -225,7 +222,6 @@ def _send_metric_alert_oapg(
         ApiResponseFor200,
     ]: ...
 
-
     @typing.overload
     def _send_metric_alert_oapg(
         self,
@@ -339,7 +335,6 @@ def send_metric_alert(
         ApiResponseFor200,
     ]: ...
 
-
     @typing.overload
     def send_metric_alert(
         self,
@@ -413,7 +408,6 @@ def post(
         ApiResponseFor200,
     ]: ...
 
-
     @typing.overload
     def post(
         self,
@@ -456,5 +450,3 @@ def post(
             timeout=timeout,
             skip_deserialization=skip_deserialization
         )
-
-
diff --git a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi
index dcc2faef..7c889cbe 100644
--- a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi
+++ b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi
@@ -18,7 +18,6 @@ import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
 
 import frozendict  # noqa: F401
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1/__init__.py b/domino/_impl/custommetrics/paths/api_metric_values_v1/__init__.py
index 7606b9e3..edbd0590 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1/__init__.py
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1/__init__.py
@@ -4,4 +4,4 @@
 
 from domino._impl.custommetrics.paths import PathValues
 
-path = PathValues.API_METRIC_VALUES_V1
\ No newline at end of file
+path = PathValues.API_METRIC_VALUES_V1
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py
index d3754efd..01d4ffe0 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py
@@ -18,7 +18,6 @@
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
 
 import frozendict  # noqa: F401
@@ -60,9 +59,8 @@ class SchemaFor400ResponseBodyApplicationJson(
     schemas.ComposedSchema,
 ):
 
-
     class MetaOapg:
-        
+
         @classmethod
         @functools.lru_cache()
         def one_of(cls):
@@ -78,7 +76,6 @@ def one_of(cls):
                 InvalidBodyEnvelopeV1,
             ]
 
-
     def __new__(
         cls,
         *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
@@ -225,7 +222,6 @@ def _log_metric_values_oapg(
         ApiResponseFor201,
     ]: ...
 
-
     @typing.overload
     def _log_metric_values_oapg(
         self,
@@ -339,7 +335,6 @@ def log_metric_values(
         ApiResponseFor201,
     ]: ...
 
-
     @typing.overload
     def log_metric_values(
         self,
@@ -413,7 +408,6 @@ def post(
         ApiResponseFor201,
     ]: ...
 
-
     @typing.overload
     def post(
         self,
@@ -456,5 +450,3 @@ def post(
             timeout=timeout,
             skip_deserialization=skip_deserialization
         )
-
-
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi
index ddf16d50..ab5ede54 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi
@@ -18,7 +18,6 @@ import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
 
 import frozendict  # noqa: F401
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/__init__.py b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/__init__.py
index 9ae04b28..294da7e1 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/__init__.py
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/__init__.py
@@ -4,4 +4,4 @@
 
 from domino._impl.custommetrics.paths import PathValues
 
-path = PathValues.API_METRIC_VALUES_V1_MODEL_MONITORING_ID_METRIC
\ No newline at end of file
+path = PathValues.API_METRIC_VALUES_V1_MODEL_MONITORING_ID_METRIC
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py
index deeb8422..ff21b1cc 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py
@@ -18,7 +18,6 @@
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
 
 import frozendict  # noqa: F401
@@ -126,9 +125,8 @@ class SchemaFor400ResponseBodyApplicationJson(
     schemas.ComposedSchema,
 ):
 
-
     class MetaOapg:
-        
+
         @classmethod
         @functools.lru_cache()
         def one_of(cls):
@@ -144,7 +142,6 @@ def one_of(cls):
                 InvalidBodyEnvelopeV1,
             ]
 
-
     def __new__(
         cls,
         *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
@@ -494,5 +491,3 @@ def get(
             timeout=timeout,
             skip_deserialization=skip_deserialization
         )
-
-
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi
index 86630097..f191e511 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi
@@ -18,7 +18,6 @@ import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
 
 import frozendict  # noqa: F401
diff --git a/domino/_impl/custommetrics/rest.py b/domino/_impl/custommetrics/rest.py
index aaea2bb3..0a711e2b 100644
--- a/domino/_impl/custommetrics/rest.py
+++ b/domino/_impl/custommetrics/rest.py
@@ -11,7 +11,6 @@
 
 import logging
 import ssl
-from urllib.parse import urlencode
 import typing
 
 import certifi
diff --git a/domino/_impl/custommetrics/schemas.py b/domino/_impl/custommetrics/schemas.py
index ac972643..a59c4929 100644
--- a/domino/_impl/custommetrics/schemas.py
+++ b/domino/_impl/custommetrics/schemas.py
@@ -38,6 +38,7 @@ class Unset(object):
     """
     pass
 
+
 unset = Unset()
 
 none_type = type(None)
@@ -228,7 +229,6 @@ class MetaOapgTyped:
     min_items: int
     discriminator: typing.Dict[str, typing.Dict[str, typing.Type['Schema']]]
 
-
     class properties:
         # to hold object properties
         pass
@@ -552,6 +552,7 @@ def __init__(
         """
         pass
 
+
 """
 import itertools
 data_types = ('None', 'FrozenDict', 'Tuple', 'Str', 'Decimal', 'Bool')
@@ -582,269 +583,394 @@ def __init__(
     BytesMixin = bytes
     FileMixin = FileIO
     # qty 2
+
     class BinaryMixin(bytes, FileIO):
         pass
+
     class NoneFrozenDictMixin(NoneClass, frozendict.frozendict):
         pass
+
     class NoneTupleMixin(NoneClass, tuple):
         pass
+
     class NoneStrMixin(NoneClass, str):
         pass
+
     class NoneDecimalMixin(NoneClass, decimal.Decimal):
         pass
+
     class NoneBoolMixin(NoneClass, BoolClass):
         pass
+
     class FrozenDictTupleMixin(frozendict.frozendict, tuple):
         pass
+
     class FrozenDictStrMixin(frozendict.frozendict, str):
         pass
+
     class FrozenDictDecimalMixin(frozendict.frozendict, decimal.Decimal):
         pass
+
     class FrozenDictBoolMixin(frozendict.frozendict, BoolClass):
         pass
+
     class TupleStrMixin(tuple, str):
         pass
+
     class TupleDecimalMixin(tuple, decimal.Decimal):
         pass
+
     class TupleBoolMixin(tuple, BoolClass):
         pass
+
     class StrDecimalMixin(str, decimal.Decimal):
         pass
+
     class StrBoolMixin(str, BoolClass):
         pass
+
     class DecimalBoolMixin(decimal.Decimal, BoolClass):
         pass
     # qty 3
+
     class NoneFrozenDictTupleMixin(NoneClass, frozendict.frozendict, tuple):
         pass
+
     class NoneFrozenDictStrMixin(NoneClass, frozendict.frozendict, str):
         pass
+
     class NoneFrozenDictDecimalMixin(NoneClass, frozendict.frozendict, decimal.Decimal):
         pass
+
     class NoneFrozenDictBoolMixin(NoneClass, frozendict.frozendict, BoolClass):
         pass
+
     class NoneTupleStrMixin(NoneClass, tuple, str):
         pass
+
     class NoneTupleDecimalMixin(NoneClass, tuple, decimal.Decimal):
         pass
+
     class NoneTupleBoolMixin(NoneClass, tuple, BoolClass):
         pass
+
     class NoneStrDecimalMixin(NoneClass, str, decimal.Decimal):
         pass
+
     class NoneStrBoolMixin(NoneClass, str, BoolClass):
         pass
+
     class NoneDecimalBoolMixin(NoneClass, decimal.Decimal, BoolClass):
         pass
+
     class FrozenDictTupleStrMixin(frozendict.frozendict, tuple, str):
         pass
+
     class FrozenDictTupleDecimalMixin(frozendict.frozendict, tuple, decimal.Decimal):
         pass
+
     class FrozenDictTupleBoolMixin(frozendict.frozendict, tuple, BoolClass):
         pass
+
     class FrozenDictStrDecimalMixin(frozendict.frozendict, str, decimal.Decimal):
         pass
+
     class FrozenDictStrBoolMixin(frozendict.frozendict, str, BoolClass):
         pass
+
     class FrozenDictDecimalBoolMixin(frozendict.frozendict, decimal.Decimal, BoolClass):
         pass
+
     class TupleStrDecimalMixin(tuple, str, decimal.Decimal):
         pass
+
     class TupleStrBoolMixin(tuple, str, BoolClass):
         pass
+
     class TupleDecimalBoolMixin(tuple, decimal.Decimal, BoolClass):
         pass
+
     class StrDecimalBoolMixin(str, decimal.Decimal, BoolClass):
         pass
     # qty 4
+
     class NoneFrozenDictTupleStrMixin(NoneClass, frozendict.frozendict, tuple, str):
         pass
+
     class NoneFrozenDictTupleDecimalMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal):
         pass
+
     class NoneFrozenDictTupleBoolMixin(NoneClass, frozendict.frozendict, tuple, BoolClass):
         pass
+
     class NoneFrozenDictStrDecimalMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal):
         pass
+
     class NoneFrozenDictStrBoolMixin(NoneClass, frozendict.frozendict, str, BoolClass):
         pass
+
     class NoneFrozenDictDecimalBoolMixin(NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass):
         pass
+
     class NoneTupleStrDecimalMixin(NoneClass, tuple, str, decimal.Decimal):
         pass
+
     class NoneTupleStrBoolMixin(NoneClass, tuple, str, BoolClass):
         pass
+
     class NoneTupleDecimalBoolMixin(NoneClass, tuple, decimal.Decimal, BoolClass):
         pass
+
     class NoneStrDecimalBoolMixin(NoneClass, str, decimal.Decimal, BoolClass):
         pass
+
     class FrozenDictTupleStrDecimalMixin(frozendict.frozendict, tuple, str, decimal.Decimal):
         pass
+
     class FrozenDictTupleStrBoolMixin(frozendict.frozendict, tuple, str, BoolClass):
         pass
+
     class FrozenDictTupleDecimalBoolMixin(frozendict.frozendict, tuple, decimal.Decimal, BoolClass):
         pass
+
     class FrozenDictStrDecimalBoolMixin(frozendict.frozendict, str, decimal.Decimal, BoolClass):
         pass
+
     class TupleStrDecimalBoolMixin(tuple, str, decimal.Decimal, BoolClass):
         pass
     # qty 5
+
     class NoneFrozenDictTupleStrDecimalMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal):
         pass
+
     class NoneFrozenDictTupleStrBoolMixin(NoneClass, frozendict.frozendict, tuple, str, BoolClass):
         pass
+
     class NoneFrozenDictTupleDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass):
         pass
+
     class NoneFrozenDictStrDecimalBoolMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass):
         pass
+
     class NoneTupleStrDecimalBoolMixin(NoneClass, tuple, str, decimal.Decimal, BoolClass):
         pass
+
     class FrozenDictTupleStrDecimalBoolMixin(frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass):
         pass
     # qty 6
+
     class NoneFrozenDictTupleStrDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass):
         pass
     # qty 8
+
     class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes):
         pass
 else:
     # qty 1
     class NoneMixin:
         _types = {NoneClass}
+
     class FrozenDictMixin:
         _types = {frozendict.frozendict}
+
     class TupleMixin:
         _types = {tuple}
+
     class StrMixin:
         _types = {str}
+
     class DecimalMixin:
         _types = {decimal.Decimal}
+
     class BoolMixin:
         _types = {BoolClass}
+
     class BytesMixin:
         _types = {bytes}
+
     class FileMixin:
         _types = {FileIO}
     # qty 2
+
     class BinaryMixin:
         _types = {bytes, FileIO}
+
     class NoneFrozenDictMixin:
         _types = {NoneClass, frozendict.frozendict}
+
     class NoneTupleMixin:
         _types = {NoneClass, tuple}
+
     class NoneStrMixin:
         _types = {NoneClass, str}
+
     class NoneDecimalMixin:
         _types = {NoneClass, decimal.Decimal}
+
     class NoneBoolMixin:
         _types = {NoneClass, BoolClass}
+
     class FrozenDictTupleMixin:
         _types = {frozendict.frozendict, tuple}
+
     class FrozenDictStrMixin:
         _types = {frozendict.frozendict, str}
+
     class FrozenDictDecimalMixin:
         _types = {frozendict.frozendict, decimal.Decimal}
+
     class FrozenDictBoolMixin:
         _types = {frozendict.frozendict, BoolClass}
+
     class TupleStrMixin:
         _types = {tuple, str}
+
     class TupleDecimalMixin:
         _types = {tuple, decimal.Decimal}
+
     class TupleBoolMixin:
         _types = {tuple, BoolClass}
+
     class StrDecimalMixin:
         _types = {str, decimal.Decimal}
+
     class StrBoolMixin:
         _types = {str, BoolClass}
+
     class DecimalBoolMixin:
         _types = {decimal.Decimal, BoolClass}
     # qty 3
+
     class NoneFrozenDictTupleMixin:
         _types = {NoneClass, frozendict.frozendict, tuple}
+
     class NoneFrozenDictStrMixin:
         _types = {NoneClass, frozendict.frozendict, str}
+
     class NoneFrozenDictDecimalMixin:
         _types = {NoneClass, frozendict.frozendict, decimal.Decimal}
+
     class NoneFrozenDictBoolMixin:
         _types = {NoneClass, frozendict.frozendict, BoolClass}
+
     class NoneTupleStrMixin:
         _types = {NoneClass, tuple, str}
+
     class NoneTupleDecimalMixin:
         _types = {NoneClass, tuple, decimal.Decimal}
+
     class NoneTupleBoolMixin:
         _types = {NoneClass, tuple, BoolClass}
+
     class NoneStrDecimalMixin:
         _types = {NoneClass, str, decimal.Decimal}
+
     class NoneStrBoolMixin:
         _types = {NoneClass, str, BoolClass}
+
     class NoneDecimalBoolMixin:
         _types = {NoneClass, decimal.Decimal, BoolClass}
+
     class FrozenDictTupleStrMixin:
         _types = {frozendict.frozendict, tuple, str}
+
     class FrozenDictTupleDecimalMixin:
         _types = {frozendict.frozendict, tuple, decimal.Decimal}
+
     class FrozenDictTupleBoolMixin:
         _types = {frozendict.frozendict, tuple, BoolClass}
+
     class FrozenDictStrDecimalMixin:
         _types = {frozendict.frozendict, str, decimal.Decimal}
+
     class FrozenDictStrBoolMixin:
         _types = {frozendict.frozendict, str, BoolClass}
+
     class FrozenDictDecimalBoolMixin:
         _types = {frozendict.frozendict, decimal.Decimal, BoolClass}
+
     class TupleStrDecimalMixin:
         _types = {tuple, str, decimal.Decimal}
+
     class TupleStrBoolMixin:
         _types = {tuple, str, BoolClass}
+
     class TupleDecimalBoolMixin:
         _types = {tuple, decimal.Decimal, BoolClass}
+
     class StrDecimalBoolMixin:
         _types = {str, decimal.Decimal, BoolClass}
     # qty 4
+
     class NoneFrozenDictTupleStrMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, str}
+
     class NoneFrozenDictTupleDecimalMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, decimal.Decimal}
+
     class NoneFrozenDictTupleBoolMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, BoolClass}
+
     class NoneFrozenDictStrDecimalMixin:
         _types = {NoneClass, frozendict.frozendict, str, decimal.Decimal}
+
     class NoneFrozenDictStrBoolMixin:
         _types = {NoneClass, frozendict.frozendict, str, BoolClass}
+
     class NoneFrozenDictDecimalBoolMixin:
         _types = {NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass}
+
     class NoneTupleStrDecimalMixin:
         _types = {NoneClass, tuple, str, decimal.Decimal}
+
     class NoneTupleStrBoolMixin:
         _types = {NoneClass, tuple, str, BoolClass}
+
     class NoneTupleDecimalBoolMixin:
         _types = {NoneClass, tuple, decimal.Decimal, BoolClass}
+
     class NoneStrDecimalBoolMixin:
         _types = {NoneClass, str, decimal.Decimal, BoolClass}
+
     class FrozenDictTupleStrDecimalMixin:
         _types = {frozendict.frozendict, tuple, str, decimal.Decimal}
+
     class FrozenDictTupleStrBoolMixin:
         _types = {frozendict.frozendict, tuple, str, BoolClass}
+
     class FrozenDictTupleDecimalBoolMixin:
         _types = {frozendict.frozendict, tuple, decimal.Decimal, BoolClass}
+
     class FrozenDictStrDecimalBoolMixin:
         _types = {frozendict.frozendict, str, decimal.Decimal, BoolClass}
+
     class TupleStrDecimalBoolMixin:
         _types = {tuple, str, decimal.Decimal, BoolClass}
     # qty 5
+
     class NoneFrozenDictTupleStrDecimalMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal}
+
     class NoneFrozenDictTupleStrBoolMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, str, BoolClass}
+
     class NoneFrozenDictTupleDecimalBoolMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass}
+
     class NoneFrozenDictStrDecimalBoolMixin:
         _types = {NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass}
+
     class NoneTupleStrDecimalBoolMixin:
         _types = {NoneClass, tuple, str, decimal.Decimal, BoolClass}
+
     class FrozenDictTupleStrDecimalBoolMixin:
         _types = {frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass}
     # qty 6
+
     class NoneFrozenDictTupleStrDecimalBoolMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass}
     # qty 8
+
     class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin:
         _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes}
 
@@ -864,8 +990,8 @@ def _is_json_validation_enabled_oapg(schema_keyword, configuration=None):
         """
 
         return (configuration is None or
-            not hasattr(configuration, '_disabled_client_side_validations') or
-            schema_keyword not in configuration._disabled_client_side_validations)
+                not hasattr(configuration, '_disabled_client_side_validations') or
+                schema_keyword not in configuration._disabled_client_side_validations)
 
     @staticmethod
     def _raise_validation_errror_message_oapg(value, constraint_msg, constraint_value, path_to_item, additional_txt=""):
@@ -1224,7 +1350,7 @@ def __check_numeric_validations(
         if not hasattr(cls, 'MetaOapg'):
             return
         if cls._is_json_validation_enabled_oapg('multipleOf',
-                                      validation_metadata.configuration) and hasattr(cls.MetaOapg, 'multiple_of'):
+                                                validation_metadata.configuration) and hasattr(cls.MetaOapg, 'multiple_of'):
             multiple_of_value = cls.MetaOapg.multiple_of
             if (not (float(arg) / multiple_of_value).is_integer()):
                 # Note 'multipleOf' will be as good as the floating point arithmetic.
@@ -2248,6 +2374,7 @@ def _validate_oapg(
         cls.__validate_format(arg, validation_metadata=validation_metadata)
         return super()._validate_oapg(arg, validation_metadata=validation_metadata)
 
+
 class Float64Schema(
     Float64Base,
     NumberSchema
diff --git a/domino/agents/logging/dominorun.py b/domino/agents/logging/dominorun.py
index 0d85d32a..b61af1a9 100644
--- a/domino/agents/logging/dominorun.py
+++ b/domino/agents/logging/dominorun.py
@@ -110,6 +110,7 @@ def _choose_summarizer(statistic: SummaryStatistic) -> Callable[[list[float]], f
         case _:
             raise ValueError(f"Unknown summary statistic: {statistic}")
 
+
 class DominoRun:
     _is_agent_context = False
 
diff --git a/domino/agents/logging/logging.py b/domino/agents/logging/logging.py
index 4f8da7c4..4738635d 100644
--- a/domino/agents/logging/logging.py
+++ b/domino/agents/logging/logging.py
@@ -16,10 +16,10 @@ def add_domino_tags(trace_id: str):
 
 
 def log_evaluation(
-        trace_id: str,
-        name: str,
-        value: float | str,
-    ):
+            trace_id: str,
+            name: str,
+            value: float | str,
+        ):
     """This logs evaluation data and metadata to a parent trace. This is used to log the evaluation of a span
     after it was created. This is useful for analyzing past performance of an Agent component.
 
diff --git a/domino/agents/read_agent_config.py b/domino/agents/read_agent_config.py
index 6a9aee3d..2fbb2915 100644
--- a/domino/agents/read_agent_config.py
+++ b/domino/agents/read_agent_config.py
@@ -40,10 +40,10 @@ def read_agent_config(path: Optional[str] = None) -> dict:
     path = path or _get_agent_config_path()
     params = {}
     try:
-            with open(path, 'r') as f:
-                params = yaml.safe_load(f)
+        with open(path, 'r') as f:
+            params = yaml.safe_load(f)
     except Exception as e:
-            logging.warning(f"Failed to read agent config yaml at path {path}: {e}")
+        logging.warning(f"Failed to read agent config yaml at path {path}: {e}")
 
     return params
 
diff --git a/domino/agents/tracing/tracing.py b/domino/agents/tracing/tracing.py
index 2b697c12..a3c5faba 100644
--- a/domino/agents/tracing/tracing.py
+++ b/domino/agents/tracing/tracing.py
@@ -9,7 +9,7 @@
 from uuid import uuid4
 
 from .._client import client
-from .inittracing import init_tracing, triggered_autolog_frameworks, call_autolog
+from .inittracing import init_tracing
 from ..logging.logging import log_evaluation
 from ._util import get_is_production, build_agent_experiment_name
 from .._eval_tags import validate_label, get_eval_tag_name
@@ -22,6 +22,7 @@
 
 DOMINO_NO_RESULT_ADD_TRACING = "domino_no_result"
 
+
 @dataclass
 class SpanSummary:
     """A span in a trace."""
@@ -41,6 +42,7 @@ class SpanSummary:
     outputs: Any
     """The outputs of the function that created the span"""
 
+
 @dataclass
 class EvaluationResult:
     """An evaluation result for a trace."""
@@ -154,6 +156,7 @@ def _do_evaluation(
 
     return None
 
+
 def _log_eval_results(
     parent_span: mlflow.entities.Span,
     evaluator: Optional[SpanEvaluator],
@@ -246,7 +249,6 @@ def ask_chat_bot(user_input: str) -> dict:
     """
     validate_label(name)
 
-
     def decorator(func):
         # For Regular Functions (e.g., langgraph_agents.run_agent)
         @functools.wraps(func)
@@ -533,10 +535,12 @@ def _search_traces(
 
     return SearchTracesResponse(trace_summaries, next_page_token)
 
+
 def _return_traced_result(result: any):
     if result != DOMINO_NO_RESULT_ADD_TRACING:
         return result
     else:
         logger.warning("No result returned from traced function")
 
+
 logger = logging.getLogger(__name__)
diff --git a/domino/authentication.py b/domino/authentication.py
index 34b96495..376c79c0 100644
--- a/domino/authentication.py
+++ b/domino/authentication.py
@@ -38,7 +38,6 @@ def _replaceHostWithProxy(self, url):
         return re.sub('^.*?://[^/]+', self.api_proxy, url)
 
 
-
 class ApiKeyAuth(AuthBase):
     """
     Class for authenticating requests using a Domino API key header.
diff --git a/domino/datasets.py b/domino/datasets.py
index 6a2f31c8..52fe6542 100644
--- a/domino/datasets.py
+++ b/domino/datasets.py
@@ -53,7 +53,7 @@ def __init__(
     ):
         cleaned_relative_local_path = os.path.relpath(os.path.normpath(local_path_to_file_or_directory), start=os.curdir)
         # in case running on windows
-        cleaned_relative_local_path = self._get_unix_style_path(cleaned_relative_local_path)        
+        cleaned_relative_local_path = self._get_unix_style_path(cleaned_relative_local_path)
 
         self.csrf_no_check_header = csrf_no_check_header
         self.dataset_id = dataset_id
@@ -133,7 +133,7 @@ def _create_chunk_queue(self) -> list[UploadChunk]:
 
                 # in case running on windows
                 cleaned_relative_path = self._get_unix_style_path(relative_path_to_file)
-                
+
                 # append chunk to queue
                 chunk_q.extend(self._create_chunks(cleaned_relative_path))
         return chunk_q
diff --git a/domino/domino.py b/domino/domino.py
index 5ea84769..ce219934 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -29,7 +29,6 @@
 from domino._custom_metrics import _CustomMetricsClientBase, _CustomMetricsClientGen, _CustomMetricsClient
 
 
-
 class Domino:
     def __init__(
         self, project, api_key=None, host=None, domino_token_file=None, auth_token=None, api_proxy=None,
@@ -693,7 +692,6 @@ def jobs_list(self,
         url = self._routes.jobs_list(project_id, order_by, sort_by, page_size, page_no, show_archived, status, tag)
         return self._get(url)
 
-
     def job_status(self, job_id: str) -> dict:
         """
         Gets the status of job with given job_id
@@ -703,10 +701,10 @@ def job_status(self, job_id: str) -> dict:
         return self.request_manager.get(self._routes.job_status(job_id)).json()
 
     def job_restart(
-            self,
-            job_id:str,
-            should_use_original_input_commit: bool = True
-        ):
+                self,
+                job_id: str,
+                should_use_original_input_commit: bool = True
+            ):
         """
         Restarts a previous job
         :param job_id:                              ID of the original job that should be restarted
@@ -1115,7 +1113,6 @@ def archive_environment(self, environment_id: str) -> None:
         url = self._routes.environment_get(environment_id)
         self.request_manager.delete(url)
 
-
     def create_environment(
             self,
             name: str,
@@ -1140,7 +1137,7 @@ def create_environment(
     ) -> dict:
         """
         Create a new Domino compute environment.
-        
+
         Args:
             name: Name of the compute environment
             visibility: Visibility level ("Private" or "Global")
@@ -1161,7 +1158,7 @@ def create_environment(
             description: Detailed description
             is_restricted: Whether the environment is restricted
             organization_owner_id: ID of an organization that will own the environment
-            
+
         Returns:
             dict: Created environment details
         """
@@ -1211,27 +1208,26 @@ def create_environment(
         response = self.request_manager.post(url, data=payload, headers={"Content-Type": "application/json"})
         return response.json()
 
-
     def create_environment_revision(
-            self,
-            environment_id: str,
-            dockerfile_instructions: str = "",
-            environment_variables: Optional[List[Dict[str, Any]]] = None,
-            base_image: Optional[str] = None,
-            post_run_script: str = "",
-            post_setup_script: str = "",
-            pre_run_script: str = "",
-            pre_setup_script: str = "",
-            skip_cache: bool = False,
-            summary: str = "",
-            supported_clusters: Optional[List[str]] = None,
-            tags: Optional[List[str]] = None,
-            use_vpn: bool = False,
-            workspace_tools: Optional[List[Dict[str, Any]]] = None,
-        ) -> dict:
+                self,
+                environment_id: str,
+                dockerfile_instructions: str = "",
+                environment_variables: Optional[List[Dict[str, Any]]] = None,
+                base_image: Optional[str] = None,
+                post_run_script: str = "",
+                post_setup_script: str = "",
+                pre_run_script: str = "",
+                pre_setup_script: str = "",
+                skip_cache: bool = False,
+                summary: str = "",
+                supported_clusters: Optional[List[str]] = None,
+                tags: Optional[List[str]] = None,
+                use_vpn: bool = False,
+                workspace_tools: Optional[List[Dict[str, Any]]] = None,
+            ) -> dict:
         """
         Create a new revision of an existing Domino environment.
-        
+
         Args:
             environment_id: ID of the environment for which to create a revision
             dockerfile_instructions: Dockerfile instructions to customize the environment
@@ -1247,7 +1243,7 @@ def create_environment_revision(
             tags: List of tags for the environment
             use_vpn: Whether to use VPN for this environment
             workspace_tools: List of workspace tools configuration
-            
+
         Returns:
             dict: Response content from the API
         """
@@ -1282,12 +1278,11 @@ def create_environment_revision(
             "workspaceTools": workspace_tools
         }
 
-        url=self._routes.revision_create(environment_id)
-        payload=json.dumps(data)
+        url = self._routes.revision_create(environment_id)
+        payload = json.dumps(data)
         response = self.request_manager.post(url, data=payload, headers={"Content-Type": "application/json"})
         return response.json()
 
-
     def restrict_environment_revision(
             self,
             environment_id: str,
@@ -1301,12 +1296,12 @@ def restrict_environment_revision(
             "isRestricted": True
         }
 
-        url=self._routes.revision_patch(environment_id, revision_id)
-        payload=json.dumps(data)
+        url = self._routes.revision_patch(environment_id, revision_id)
+        payload = json.dumps(data)
         self.request_manager.patch(url, data=payload, headers={"Content-Type": "application/json"})
 
-
     # Model Manager functions
+
     def models_list(self):
         url = self._routes.models_list()
         return self._get(url)
diff --git a/domino/exceptions.py b/domino/exceptions.py
index 30955696..6a2cbaa8 100644
--- a/domino/exceptions.py
+++ b/domino/exceptions.py
@@ -75,11 +75,13 @@ class UnsupportedFieldException(DominoException):
 
     pass
 
+
 class UnsupportedOperationException(DominoException):
     """Unsupported operation Exception"""
 
     pass
 
+
 class MalformedInputException(DominoException):
     """Malformed input Exception"""
 
diff --git a/domino/http_request_manager.py b/domino/http_request_manager.py
index 658be372..96d31cd7 100644
--- a/domino/http_request_manager.py
+++ b/domino/http_request_manager.py
@@ -7,17 +7,18 @@
 from requests.adapters import HTTPAdapter, Retry
 from requests.auth import AuthBase
 
-from ._version import __version__
 from .constants import DOMINO_VERIFY_CERTIFICATE
 from .exceptions import ReloginRequiredException
 
 
 R_SESSION_MAX_RETRIES = 4
 
+
 class _SessionInitializer:
     def __initialize__(self, session):
         raise NotImplementedError('Session initializers must be callable.')
 
+
 class _HttpRequestManager:
     """
     This class is responsible for
diff --git a/domino/routes.py b/domino/routes.py
index 610650fc..ed20e1a2 100644
--- a/domino/routes.py
+++ b/domino/routes.py
@@ -185,8 +185,8 @@ def environment_create(self):
     def environment_get(self, environment_id):
         return self._build_v1_environments_url() + f"/{environment_id}"
 
-
     # # Environment Revision URLs
+
     def revision_get(self, revision_id):
         return self._build_v4_environments_url() + f"/environmentRevision/{revision_id}"
 
@@ -196,8 +196,8 @@ def revision_create(self, environment_id):
     def revision_patch(self, environment_id, revision_id):
         return self._build_beta_environments_url() + f"/{environment_id}/revisions/{revision_id}"
 
-
     # Deployment URLs
+
     def deployment_version(self):
         return self.host + "/version"
 
@@ -331,7 +331,7 @@ def metric_alerts(self):
         return self.host + "/api/metricAlerts/v1"
 
     def log_metrics(self):
-        return self.host + f"/api/metricValues/v1"
+        return self.host + "/api/metricValues/v1"
 
     def read_metrics(self, model_monitoring_id, metric):
         return self.host + f"/api/metricValues/v1/{model_monitoring_id}/{metric}"
diff --git a/examples/example_budget_manager.py b/examples/example_budget_manager.py
index 5c8f12d3..878324c4 100644
--- a/examples/example_budget_manager.py
+++ b/examples/example_budget_manager.py
@@ -142,7 +142,7 @@ def get_uuid() -> str:
 pprint(projects_bt_04)
 
 # update projects' billing tags in bulk
-projects_tags = {bt_project["id"]: "BTExample06", project["id"]:  "BTExample06", domino.project_id: "BTExample04"}
+projects_tags = {bt_project["id"]: "BTExample06", project["id"]: "BTExample06", domino.project_id: "BTExample04"}
 domino.project_billing_tag_bulk_update(projects_tags)
 
 # query project by billing tag
diff --git a/setup.py b/setup.py
index 4a494a04..07db0ddc 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
 from setuptools import find_packages, setup
 
 if sys.version_info < (3, 10):
-    message = f"dominodatalab requires Python '>=3.10.0' but the running Python is {'.'.join(map(str,sys.version_info[:3]))}"
+    message = f"dominodatalab requires Python '>=3.10.0' but the running Python is {'.'.join(map(str, sys.version_info[:3]))}"
     message += "\nConsider Checking python-domino and domino compatibility"
     sys.exit(message)
 
@@ -71,8 +71,8 @@ def get_version():
             "pytest-order>=1.3.0",
             "pytest-asyncio>=0.23.8",
             "scikit-learn>=1.6.1",  # used in agent tracing tests
-            "openai>=2.7.2", # used in agent tracing tests
-            "ai-mock>=0.3.1", # used in agent tracing tests
+            "openai>=2.7.2",  # used in agent tracing tests
+            "ai-mock>=0.3.1",  # used in agent tracing tests
             "black==22.3.0",
             "flake8==4.0.1",
             "Jinja2==2.11.3",
@@ -90,7 +90,7 @@ def get_version():
         ],
         "docs": [
             "sphinx>=7.4.0",
-            "markupsafe==2.0.1", # added for using Jinja2 with sphinx and python 3.10
+            "markupsafe==2.0.1",  # added for using Jinja2 with sphinx and python 3.10
         ]
     },
 )
diff --git a/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py
index 38011b0f..3e8107c4 100644
--- a/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py
index 940e884c..a0dd2243 100644
--- a/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_metadata_v1.py b/tests/_impl/custommetrics/test_models/test_metadata_v1.py
index 58a97394..17464f6e 100644
--- a/tests/_impl/custommetrics/test_models/test_metadata_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metadata_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.metadata_v1 import MetadataV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py b/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py
index 3c00a903..68229e7c 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.metric_alert_request_v1 import MetricAlertRequestV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py b/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py
index f43a30c4..77d7f18a 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.metric_tag_v1 import MetricTagV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_metric_value_v1.py b/tests/_impl/custommetrics/test_models/test_metric_value_v1.py
index 8fecdb3b..f8290b83 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_value_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_value_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.metric_value_v1 import MetricValueV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py
index d8875b61..8f6129d2 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.metric_values_envelope_v1 import MetricValuesEnvelopeV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py b/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py
index 9839222b..db257d67 100644
--- a/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.new_metric_value_v1 import NewMetricValueV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py
index 1d34dffc..283cd223 100644
--- a/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.new_metric_values_envelope_v1 import NewMetricValuesEnvelopeV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_models/test_target_range_v1.py b/tests/_impl/custommetrics/test_models/test_target_range_v1.py
index 089bec92..68f8cc9b 100644
--- a/tests/_impl/custommetrics/test_models/test_target_range_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_target_range_v1.py
@@ -11,8 +11,6 @@
 
 import unittest
 
-import domino._impl.custommetrics
-from domino._impl.custommetrics.model.target_range_v1 import TargetRangeV1
 from domino._impl.custommetrics import configuration
 
 
diff --git a/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py b/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py
index 69a33ff5..10c57bb0 100644
--- a/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py
+++ b/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py
@@ -7,13 +7,10 @@
 """
 
 import unittest
-from unittest.mock import patch
 
-import urllib3
 
-import domino._impl.custommetrics
 from domino._impl.custommetrics.paths.api_metric_alerts_v1 import post  # noqa: E501
-from domino._impl.custommetrics import configuration, schemas, api_client
+from domino._impl.custommetrics import configuration, api_client
 
 from .. import ApiTestMixin
 
@@ -36,7 +33,5 @@ def tearDown(self):
     response_body = ''
 
 
-
-
 if __name__ == '__main__':
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py
index ed2b898a..bb1871f0 100644
--- a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py
+++ b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py
@@ -7,13 +7,10 @@
 """
 
 import unittest
-from unittest.mock import patch
 
-import urllib3
 
-import domino._impl.custommetrics
 from domino._impl.custommetrics.paths.api_metric_values_v1 import post  # noqa: E501
-from domino._impl.custommetrics import configuration, schemas, api_client
+from domino._impl.custommetrics import configuration, api_client
 
 from .. import ApiTestMixin
 
@@ -36,7 +33,5 @@ def tearDown(self):
     response_body = ''
 
 
-
-
 if __name__ == '__main__':
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py
index dfab2ce8..c6018acd 100644
--- a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py
+++ b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py
@@ -7,13 +7,10 @@
 """
 
 import unittest
-from unittest.mock import patch
 
-import urllib3
 
-import domino._impl.custommetrics
 from domino._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric import get  # noqa: E501
-from domino._impl.custommetrics import configuration, schemas, api_client
+from domino._impl.custommetrics import configuration, api_client
 
 from .. import ApiTestMixin
 
@@ -35,7 +32,5 @@ def tearDown(self):
     response_status = 200
 
 
-
-
 if __name__ == '__main__':
     unittest.main()
diff --git a/tests/agents/test_agents_eval_tags.py b/tests/agents/test_agents_eval_tags.py
index 43ef631f..744526a8 100644
--- a/tests/agents/test_agents_eval_tags.py
+++ b/tests/agents/test_agents_eval_tags.py
@@ -1,5 +1,6 @@
 from domino.agents._eval_tags import build_eval_result_tag
 
+
 def test_build_eval_result_tags():
-        assert build_eval_result_tag('my_metric', '1') ==  'domino.prog.metric.my_metric', 'numbers should be metrics'
-        assert build_eval_result_tag('my_label', 'cat') ==  'domino.prog.label.my_label', 'strings should be labels'
+    assert build_eval_result_tag('my_metric', '1') == 'domino.prog.metric.my_metric', 'numbers should be metrics'
+    assert build_eval_result_tag('my_label', 'cat') == 'domino.prog.label.my_label', 'strings should be labels'
diff --git a/tests/agents/test_read_agent_config.py b/tests/agents/test_read_agent_config.py
index 67c051cf..4bfb91f5 100644
--- a/tests/agents/test_read_agent_config.py
+++ b/tests/agents/test_read_agent_config.py
@@ -5,38 +5,41 @@
 from domino.agents.read_agent_config import flatten_dict
 from ..conftest import TEST_AGENTS_ENV_VARS
 
+
 def test_read_agent_config_path_from_env_var():
-        with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
-                config_values = read_agent_config()
+    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
+        config_values = read_agent_config()
+
+        assert config_values['version'] == 1.0
+        assert config_values['chat_assistant']['model'] == 'gpt-3.5-turbo'
+        assert config_values['chat_assistant']['temperature'] == 0.7
+        assert config_values['chat_assistant']['max_tokens'] == 1500
 
-                assert config_values['version'] == 1.0
-                assert config_values['chat_assistant']['model'] == 'gpt-3.5-turbo'
-                assert config_values['chat_assistant']['temperature'] == 0.7
-                assert config_values['chat_assistant']['max_tokens'] == 1500
 
 def test_read_agent_config_path_from_override_arg():
-        with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_CONFIG_PATH": "broken_path"}, clear=True):
-                config_values = read_agent_config("tests/assets/agent_config.yaml")
+    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_CONFIG_PATH": "broken_path"}, clear=True):
+        config_values = read_agent_config("tests/assets/agent_config.yaml")
+
+        assert config_values['version'] == 1.0
+        assert config_values['chat_assistant']['model'] == 'gpt-3.5-turbo'
+        assert config_values['chat_assistant']['temperature'] == 0.7
+        assert config_values['chat_assistant']['max_tokens'] == 1500
 
-                assert config_values['version'] == 1.0
-                assert config_values['chat_assistant']['model'] == 'gpt-3.5-turbo'
-                assert config_values['chat_assistant']['temperature'] == 0.7
-                assert config_values['chat_assistant']['max_tokens'] == 1500
 
 def test_flatten_dict():
-        nested_dict = {
-                'a': 1,
-                'b': {
-                        'c': 2,
-                        'd': { 'e': 3 }
-                },
-                'f': 4
-        }
-        flat_dict = flatten_dict(nested_dict)
-        expected_flat_dict = {
-                'a': 1,
-                'b.c': 2,
-                'b.d.e': 3,
-                'f': 4
-        }
-        assert flat_dict == expected_flat_dict
+    nested_dict = {
+            'a': 1,
+            'b': {
+                    'c': 2,
+                    'd': {'e': 3}
+            },
+            'f': 4
+    }
+    flat_dict = flatten_dict(nested_dict)
+    expected_flat_dict = {
+            'a': 1,
+            'b.c': 2,
+            'b.d.e': 3,
+            'f': 4
+    }
+    assert flat_dict == expected_flat_dict
diff --git a/tests/agents/test_verify_domino_support.py b/tests/agents/test_verify_domino_support.py
index 72d518d2..720010b2 100644
--- a/tests/agents/test_verify_domino_support.py
+++ b/tests/agents/test_verify_domino_support.py
@@ -8,75 +8,82 @@
 from domino.exceptions import UnsupportedOperationException
 from ..conftest import TEST_AGENTS_ENV_VARS
 
+
 def test_get_version_endpoint():
-        with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True):
-                assert _get_version_endpoint() == "http://localhost:1111/version"
+    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True):
+        assert _get_version_endpoint() == "http://localhost:1111/version"
+
 
 def test_verify_domino_support_when_get_domino_version_fails(caplog):
-        """
-        If we fail to get the domino version, we shouldn't fail everything, since this may be due to network error
-        and likely if they are on the wrong domino version, the mlflow-proxy won't support new code anyway.
-        """
-        with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True), \
-                patch('domino.agents._verify_domino_support._get_domino_version', side_effect=RuntimeError("test_verify_domino_support_when_get_domino_version_fails")), \
-                patch('domino.agents._verify_domino_support._get_mlflow_version') as mock_get_mlflow_version, \
-                caplog.at_level(logging.DEBUG):
-
-                from domino.agents._verify_domino_support import _verify_domino_support_impl
-                mock_get_mlflow_version.return_value = MIN_MLFLOW_VERSION
-
-                # Should not raise and should pass
-                _verify_domino_support_impl()
-                assert "Failed to get Domino version. Will continue without version info: test_verify_domino_support_when_get_domino_version_fails" in caplog.text
+    """
+    If we fail to get the domino version, we shouldn't fail everything, since this may be due to network error
+    and likely if they are on the wrong domino version, the mlflow-proxy won't support new code anyway.
+    """
+    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True), \
+            patch('domino.agents._verify_domino_support._get_domino_version', side_effect=RuntimeError("test_verify_domino_support_when_get_domino_version_fails")), \
+            patch('domino.agents._verify_domino_support._get_mlflow_version') as mock_get_mlflow_version, \
+            caplog.at_level(logging.DEBUG):
 
-def test_verify_domino_support_domino_and_mlflow_correct_version(verify_domino_support_fixture):
         from domino.agents._verify_domino_support import _verify_domino_support_impl
-        verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
-        verify_domino_support_fixture['mock_get_mlflow_version'].return_value = MIN_MLFLOW_VERSION
+        mock_get_mlflow_version.return_value = MIN_MLFLOW_VERSION
 
-        # Should not raise
+        # Should not raise and should pass
         _verify_domino_support_impl()
+        assert "Failed to get Domino version. Will continue without version info: test_verify_domino_support_when_get_domino_version_fails" in caplog.text
+
+
+def test_verify_domino_support_domino_and_mlflow_correct_version(verify_domino_support_fixture):
+    from domino.agents._verify_domino_support import _verify_domino_support_impl
+    verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
+    verify_domino_support_fixture['mock_get_mlflow_version'].return_value = MIN_MLFLOW_VERSION
+
+    # Should not raise
+    _verify_domino_support_impl()
+
 
 @pytest.mark.order(1)
 def test_verify_domino_support_should_be_idempotent(verify_domino_support_fixture, mocker):
-        """
-        This test must run first, because if verifies global functionality, which is incidentally exercised
-        by other tests.
-        """
-        from domino.agents._verify_domino_support import verify_domino_support
-        verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
-        verify_domino_support_fixture['mock_get_mlflow_version'].return_value = MIN_MLFLOW_VERSION
+    """
+    This test must run first, because if verifies global functionality, which is incidentally exercised
+    by other tests.
+    """
+    from domino.agents._verify_domino_support import verify_domino_support
+    verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
+    verify_domino_support_fixture['mock_get_mlflow_version'].return_value = MIN_MLFLOW_VERSION
 
-        import domino.agents._verify_domino_support
-        get_domino_version_spy = mocker.spy(domino.agents._verify_domino_support, "_get_domino_version")
+    import domino.agents._verify_domino_support
+    get_domino_version_spy = mocker.spy(domino.agents._verify_domino_support, "_get_domino_version")
 
-        verify_domino_support()
-        verify_domino_support()
+    verify_domino_support()
+    verify_domino_support()
+
+    assert get_domino_version_spy.call_count == 1
 
-        assert get_domino_version_spy.call_count == 1
 
 def test_verify_domino_support_domino_wrong_version(verify_domino_support_fixture):
-        from domino.agents._verify_domino_support import _verify_domino_support_impl
-        verify_domino_support_fixture['mock_get_domino_version'].return_value = "6.1.2"
+    from domino.agents._verify_domino_support import _verify_domino_support_impl
+    verify_domino_support_fixture['mock_get_domino_version'].return_value = "6.1.2"
+
+    with pytest.raises(UnsupportedOperationException) as exn:
+        _verify_domino_support_impl()
 
-        with pytest.raises(UnsupportedOperationException) as exn:
-                _verify_domino_support_impl()
+    assert str(exn.value) == "This version of Domino doesn’t support the agents package."
 
-        assert str(exn.value) == "This version of Domino doesn’t support the agents package."
 
 def test_verify_domino_support_mlflow_wrong_version(verify_domino_support_fixture):
-        from domino.agents._verify_domino_support import _verify_domino_support_impl
-        verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
-        verify_domino_support_fixture['mock_get_mlflow_version'].return_value = '3.1.0'
+    from domino.agents._verify_domino_support import _verify_domino_support_impl
+    verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
+    verify_domino_support_fixture['mock_get_mlflow_version'].return_value = '3.1.0'
+
+    with pytest.raises(UnsupportedOperationException) as exn:
+        _verify_domino_support_impl()
 
-        with pytest.raises(UnsupportedOperationException) as exn:
-                _verify_domino_support_impl()
+    assert str(exn.value) == f"This code requires you to install mlflow>={MIN_MLFLOW_VERSION}"
 
-        assert str(exn.value) == f"This code requires you to install mlflow>={MIN_MLFLOW_VERSION}"
 
 @pytest.fixture
 def verify_domino_support_fixture():
-        with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True), \
-                patch('domino.agents._verify_domino_support._get_domino_version') as mock_get_domino_version, \
-                patch('domino.agents._verify_domino_support._get_mlflow_version') as mock_get_mlflow_version:
-                yield { 'mock_get_domino_version': mock_get_domino_version, 'mock_get_mlflow_version': mock_get_mlflow_version }
+    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True), \
+            patch('domino.agents._verify_domino_support._get_domino_version') as mock_get_domino_version, \
+            patch('domino.agents._verify_domino_support._get_mlflow_version') as mock_get_mlflow_version:
+        yield {'mock_get_domino_version': mock_get_domino_version, 'mock_get_mlflow_version': mock_get_mlflow_version}
diff --git a/tests/conftest.py b/tests/conftest.py
index 2622a6e4..b593fc64 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -28,6 +28,7 @@
     "version": "9.9.9",
 }
 
+
 @pytest.fixture
 def dummy_hostname():
     return "http://domino.somefakecompany.com"
@@ -129,7 +130,6 @@ def mock_proxy_response():
         yield response
 
 
-
 @pytest.fixture
 def mock_domino_version_response():
     """
@@ -197,6 +197,7 @@ def __init__(self, *args, **kwargs):
             self.header = None
     return TestAuth()
 
+
 @pytest.fixture(scope="session")
 def docker_client():
     return docker.from_env()
diff --git a/tests/integration/agents/conftest.py b/tests/integration/agents/conftest.py
index 95847700..e38568d1 100644
--- a/tests/integration/agents/conftest.py
+++ b/tests/integration/agents/conftest.py
@@ -1,96 +1,100 @@
 import logging as logger
 import os
-from docker.types import Mount
 import polling2
 import pytest
 import shutil
 from unittest.mock import patch
 import subprocess
-import sys
 
 from ...conftest import TEST_AGENTS_ENV_VARS
 from domino.agents._constants import MIN_MLFLOW_VERSION
-from .test_util import reset_prod_tracing
+
 
 @pytest.fixture
 def tracing():
-        pytest.importorskip("mlflow")
-        import domino.agents.tracing as tracing
-        yield tracing
+    pytest.importorskip("mlflow")
+    import domino.agents.tracing as tracing
+    yield tracing
+
 
 @pytest.fixture
 def logging():
-        pytest.importorskip("mlflow")
-        import domino.agents.logging as logging
-        yield logging
+    pytest.importorskip("mlflow")
+    import domino.agents.logging as logging
+    yield logging
+
 
 @pytest.fixture
 def mlflow():
-        pytest.importorskip("mlflow")
-        import mlflow
-        yield mlflow
+    pytest.importorskip("mlflow")
+    import mlflow
+    yield mlflow
+
 
 def _remove_mlruns_data():
-        try:
-                shutil.rmtree('./mlruns')
-        except Exception as e:
-                logger.warning(f"Failed to remove mlfruns directory during test cleanup: {e}")
+    try:
+        shutil.rmtree('./mlruns')
+    except Exception as e:
+        logger.warning(f"Failed to remove mlfruns directory during test cleanup: {e}")
+
 
 @pytest.fixture(scope="package")
 def setup_openai_mock_server():
-        server_command = ['pipenv', 'run', 'ai-mock', 'server']
-        server_process = subprocess.Popen(server_command)
-        yield
-        server_process.kill()
+    server_command = ['pipenv', 'run', 'ai-mock', 'server']
+    server_process = subprocess.Popen(server_command)
+    yield
+    server_process.kill()
+
 
 @pytest.fixture(scope="package")
 def setup_mlflow_tracking_server_no_env_var_mock(docker_client):
-        pytest.importorskip("mlflow")
-        from mlflow import MlflowClient
-
-        with patch("domino.agents._verify_domino_support.verify_domino_support", clear=True) as mock_verify_domino_support:
-                mock_verify_domino_support.return_value = None
-                container_name = "test_mlflow_tracking_server"
-                docker_client.containers.run(
-                        f"ghcr.io/mlflow/mlflow:v{MIN_MLFLOW_VERSION}",
-                        detach=True,
-                        name=container_name,
-                        ports={5000:5000},
-                        command="mlflow ui --host 0.0.0.0 --serve-artifacts",
-                )
-
-                try:
-                        live_container = polling2.poll(
-                                target=lambda: docker_client.containers.get(container_name),
-                                check_success=lambda container: container.status == 'running',
-                                timeout=10,
-                                step=2,
-                                ignore_exceptions=True,
-                        )
-
-                        # verify api is reachable
-                        client = MlflowClient()
-                        experiments = polling2.poll(
-                                target=lambda: client.search_experiments(),
-                                check_success=lambda exp: True,
-                                timeout=10,
-                                step=2,
-                                ignore_exceptions=True,
-                        )
-
-                        yield live_container
-                        live_container.remove(force=True)
-                        _remove_mlruns_data()
-                except Exception as e:
-                        live_container = docker_client.containers.get(container_name)
-                        container_status = live_container.status
-                        logger.error(f'Mlflow tracking server did not get to running state. status: {container_status}')
-                        logger.info(live_container.logs())
-                        live_container.remove(force=True)
-                        _remove_mlruns_data()
-                        raise e
+    pytest.importorskip("mlflow")
+    from mlflow import MlflowClient
+
+    with patch("domino.agents._verify_domino_support.verify_domino_support", clear=True) as mock_verify_domino_support:
+        mock_verify_domino_support.return_value = None
+        container_name = "test_mlflow_tracking_server"
+        docker_client.containers.run(
+                f"ghcr.io/mlflow/mlflow:v{MIN_MLFLOW_VERSION}",
+                detach=True,
+                name=container_name,
+                ports={5000: 5000},
+                command="mlflow ui --host 0.0.0.0 --serve-artifacts",
+        )
+
+        try:
+            live_container = polling2.poll(
+                    target=lambda: docker_client.containers.get(container_name),
+                    check_success=lambda container: container.status == 'running',
+                    timeout=10,
+                    step=2,
+                    ignore_exceptions=True,
+            )
+
+            # verify api is reachable
+            client = MlflowClient()
+            experiments = polling2.poll(
+                    target=lambda: client.search_experiments(),
+                    check_success=lambda exp: True,
+                    timeout=10,
+                    step=2,
+                    ignore_exceptions=True,
+            )
+
+            yield live_container
+            live_container.remove(force=True)
+            _remove_mlruns_data()
+        except Exception as e:
+            live_container = docker_client.containers.get(container_name)
+            container_status = live_container.status
+            logger.error(f'Mlflow tracking server did not get to running state. status: {container_status}')
+            logger.info(live_container.logs())
+            live_container.remove(force=True)
+            _remove_mlruns_data()
+            raise e
+
 
 @pytest.fixture
 def setup_mlflow_tracking_server(setup_mlflow_tracking_server_no_env_var_mock, docker_client):
-        with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
-                yield setup_mlflow_tracking_server_no_env_var_mock
+    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
+        yield setup_mlflow_tracking_server_no_env_var_mock
diff --git a/tests/integration/agents/mlflow_fixtures.py b/tests/integration/agents/mlflow_fixtures.py
index 80c49ab8..1a423a6e 100644
--- a/tests/integration/agents/mlflow_fixtures.py
+++ b/tests/integration/agents/mlflow_fixtures.py
@@ -2,78 +2,83 @@
 import pytest
 import os
 from unittest.mock import patch
-from typing import Callable, Optional
+from typing import Optional
 
 from domino.agents._client import client
 from domino.agents.tracing._util import build_agent_experiment_name
 from .conftest import TEST_AGENTS_ENV_VARS
 from .test_util import reset_prod_tracing
 
+
 def fixture_create_traces():
-        pytest.importorskip("mlflow")
-        import mlflow
-        @mlflow.trace(name="test_add")
-        def test_add(x, y):
-                return x + y
+    pytest.importorskip("mlflow")
+    import mlflow
+
+    @mlflow.trace(name="test_add")
+    def test_add(x, y):
+        return x + y
+
+    # create traces
+    with mlflow.start_run():
+        test_add(1, 2)
 
-        # create traces
-        with mlflow.start_run():
-                test_add(1, 2)
 
 def add_prod_tags(traces: Optional[list], agent_id: str, agent_version: str):
-        # adds prod tags to traces, simulating what domino services would do in a prod deployment
+    # adds prod tags to traces, simulating what domino services would do in a prod deployment
 
-        pytest.importorskip("mlflow")
-        import mlflow
-        if not traces:
-                exp_name = build_agent_experiment_name(agent_id)
-                exp = mlflow.get_experiment_by_name(exp_name)
+    pytest.importorskip("mlflow")
+    import mlflow
+    if not traces:
+        exp_name = build_agent_experiment_name(agent_id)
+        exp = mlflow.get_experiment_by_name(exp_name)
 
-                traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+        traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+
+    for t in traces:
+        client.set_trace_tag(t.info.trace_id, "mlflow.domino.app_id", agent_id)
+        client.set_trace_tag(t.info.trace_id, "mlflow.domino.app_version_id", agent_version)
 
-        for t in traces:
-                client.set_trace_tag(t.info.trace_id, "mlflow.domino.app_id", agent_id)
-                client.set_trace_tag(t.info.trace_id, "mlflow.domino.app_version_id", agent_version)
 
 def create_span_at_time(name: str, inputs: int, hours_ago: int, experiment_id: str):
-        pytest.importorskip("mlflow")
-        import mlflow
+    pytest.importorskip("mlflow")
+    import mlflow
+
+    dt = datetime.now() - timedelta(hours=hours_ago)
+    ns = int(dt.timestamp() * 1e9)
+    span = mlflow.start_span_no_context(name=name, inputs=inputs, experiment_id=experiment_id, start_time_ns=ns)
+    span.end()
 
-        dt = datetime.now() - timedelta(hours=hours_ago)
-        ns = int(dt.timestamp() * 1e9)
-        span = mlflow.start_span_no_context(name=name, inputs=inputs, experiment_id=experiment_id, start_time_ns=ns)
-        span.end()
 
 def fixture_create_prod_traces(
         agent_id: str,
         agent_version: str,
         trace_name: str,
         tracing,
-        hours_ago: Optional[int] = None, # also used as input value for span
+        hours_ago: Optional[int] = None,  # also used as input value for span
 ):
-        """Creates prod agent traces with a specific trace name"""
-        pytest.importorskip("mlflow")
-        import mlflow
+    """Creates prod agent traces with a specific trace name"""
+    pytest.importorskip("mlflow")
+    import mlflow
 
-        reset_prod_tracing()
+    reset_prod_tracing()
 
-        @tracing.add_tracing(name=trace_name)
-        def one(x):
-                return x
+    @tracing.add_tracing(name=trace_name)
+    def one(x):
+        return x
 
-        env_vars = TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": agent_id }
-        with patch.dict(os.environ, env_vars, clear=True):
-                tracing.init_tracing()
-                if hours_ago is not None:
-                        experiment = mlflow.get_experiment_by_name(build_agent_experiment_name(agent_id))
-                        create_span_at_time(trace_name, hours_ago, hours_ago, experiment.experiment_id)
-                else:
-                        one(1)
+    env_vars = TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": agent_id}
+    with patch.dict(os.environ, env_vars, clear=True):
+        tracing.init_tracing()
+        if hours_ago is not None:
+            experiment = mlflow.get_experiment_by_name(build_agent_experiment_name(agent_id))
+            create_span_at_time(trace_name, hours_ago, hours_ago, experiment.experiment_id)
+        else:
+            one(1)
 
-                exp_name = build_agent_experiment_name(agent_id)
-                exp = mlflow.get_experiment_by_name(exp_name)
+        exp_name = build_agent_experiment_name(agent_id)
+        exp = mlflow.get_experiment_by_name(exp_name)
 
-                ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string=f"trace.name = '{trace_name}'", return_type='list')
+        ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string=f"trace.name = '{trace_name}'", return_type='list')
 
-                # add prod tags (would be done by Domino deployment)
-                add_prod_tags(ts, agent_id, agent_version)
+        # add prod tags (would be done by Domino deployment)
+        add_prod_tags(ts, agent_id, agent_version)
diff --git a/tests/integration/agents/test_domino_run.py b/tests/integration/agents/test_domino_run.py
index 164b73e6..40790097 100644
--- a/tests/integration/agents/test_domino_run.py
+++ b/tests/integration/agents/test_domino_run.py
@@ -1,344 +1,355 @@
 import pytest
 import threading
-from unittest.mock import call
 
 from domino.agents._constants import AGENT_RUN_TAG
 
+
 def test_domino_run_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        """
-        DominoRun will contain the agent configuration loggged as parameters and the summary metrics for its
-                evaluation traces which are attached to the run only,
-                and a logged model with the agent configuration,
-                and a default summarization metrics are computed for the evaluation traces
-
-                the sklearn autolog function will be called
-        """
-        # must import logging from the module instead of the package
-        # so that mocker works
-        create_external_model_spy = mocker.spy(mlflow, "create_external_model")
-        exp = mlflow.set_experiment("test_domino_run")
-
-        @tracing.add_tracing(name="add_numbers", autolog_frameworks=['sklearn'], evaluator=lambda span: { 'add_numbers': span.outputs })
-        def add_numbers(x, y):
-                return x + y
-
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-                add_numbers(1, 1)
-                add_numbers(2, 2)
+    """
+    DominoRun will contain the agent configuration loggged as parameters and the summary metrics for its
+            evaluation traces which are attached to the run only,
+            and a logged model with the agent configuration,
+            and a default summarization metrics are computed for the evaluation traces
+
+            the sklearn autolog function will be called
+    """
+    # must import logging from the module instead of the package
+    # so that mocker works
+    create_external_model_spy = mocker.spy(mlflow, "create_external_model")
+    exp = mlflow.set_experiment("test_domino_run")
+
+    @tracing.add_tracing(name="add_numbers", autolog_frameworks=['sklearn'], evaluator=lambda span: {'add_numbers': span.outputs})
+    def add_numbers(x, y):
+        return x + y
+
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
+        add_numbers(1, 1)
+        add_numbers(2, 2)
 
-        # verify logged model created only once
-        assert create_external_model_spy.call_count == 1, "create external model should be called once"
+    # verify logged model created only once
+    assert create_external_model_spy.call_count == 1, "create external model should be called once"
 
-        models = mlflow.search_logged_models(experiment_ids=[exp.experiment_id], output_format='list')
+    models = mlflow.search_logged_models(experiment_ids=[exp.experiment_id], output_format='list')
 
-        assert len(models) == 1
+    assert len(models) == 1
 
-        model = models[0]
-        # verify agent config added as configuration
-        assert model.params['chat_assistant.max_tokens'] == '1500'
-        assert model.params['chat_assistant.model'] == 'gpt-3.5-turbo'
-        assert model.params['chat_assistant.temperature'] == '0.7'
-        assert model.params['version'] == '1.0'
+    model = models[0]
+    # verify agent config added as configuration
+    assert model.params['chat_assistant.max_tokens'] == '1500'
+    assert model.params['chat_assistant.model'] == 'gpt-3.5-turbo'
+    assert model.params['chat_assistant.temperature'] == '0.7'
+    assert model.params['version'] == '1.0'
 
-        # verify evaluation traces not logged to model
-        ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], model_id=model.model_id, return_type='list')
-        assert len(ts) == 0, "traces should not be logged to model"
+    # verify evaluation traces not logged to model
+    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], model_id=model.model_id, return_type='list')
+    assert len(ts) == 0, "traces should not be logged to model"
 
-        run = mlflow.get_run(run_id)
+    run = mlflow.get_run(run_id)
 
-        # verify run has agent config logged to it as parameters
-        assert run.data.params['chat_assistant.max_tokens'] == '1500'
-        assert run.data.params['chat_assistant.model'] == 'gpt-3.5-turbo'
-        assert run.data.params['chat_assistant.temperature'] == '0.7'
-        assert run.data.params['version'] == '1.0'
+    # verify run has agent config logged to it as parameters
+    assert run.data.params['chat_assistant.max_tokens'] == '1500'
+    assert run.data.params['chat_assistant.model'] == 'gpt-3.5-turbo'
+    assert run.data.params['chat_assistant.temperature'] == '0.7'
+    assert run.data.params['version'] == '1.0'
 
-        assert run.data.tags.get(AGENT_RUN_TAG) == "false", "DominoRun should tag the run as not an agent run"
+    assert run.data.tags.get(AGENT_RUN_TAG) == "false", "DominoRun should tag the run as not an agent run"
+
+    # verify run has summary metrics logged to it
+    # average of outputs is 2 + 4/2 = 3
+    assert run.data.metrics['mean_add_numbers'] == 3, "average of add_numbers should be 3"
 
-        # verify run has summary metrics logged to it
-        # average of outputs is 2 + 4/2 = 3
-        assert run.data.metrics['mean_add_numbers'] == 3, "average of add_numbers should be 3"
 
 def test_domino_run_dev_custom_aggregator(setup_mlflow_tracking_server, mlflow, tracing, logging):
-        """
-        DominoRun will contain custom summarizaiton metrics for eval traces
-        """
-        exp = mlflow.set_experiment("test_domino_run_custom_aggregator")
-
-        @tracing.add_tracing(name="median", evaluator=lambda span: { 'median': span.outputs })
-        def for_median(x):
-                return x
-
-        @tracing.add_tracing(name="mean", evaluator=lambda span: { 'mean': span.outputs })
-        def for_mean(x):
-                return x
-
-        @tracing.add_tracing(name="stdev", evaluator=lambda span: { 'stdev': span.outputs })
-        def for_stdev(x):
-                return x
-
-        @tracing.add_tracing(name="min", evaluator=lambda span: { 'min': span.outputs })
-        def for_min(x):
-                return x
-
-        @tracing.add_tracing(name="max", evaluator=lambda span: { 'max': span.outputs })
-        def for_max(x):
-                return x
-
-        summarization_metrics = [
-                ('median', 'median'),
-                ('mean', 'mean'),
-                ('stdev', 'stdev'),
-                ('min', 'min'),
-                ('max', 'max')
-        ]
-        run_id = None
-        with logging.DominoRun(custom_summary_metrics=summarization_metrics) as run:
-                run_id = run.info.run_id
-                for i in range(1, 6):
-                        for_median(i)
-                        for_mean(i)
-                        for_stdev(i)
-                        for_min(i)
-                        for_max(i)
-
-        run = mlflow.get_run(run_id)
-
-        # verify run has summary metrics logged to it
-        # mean of outputs is 2 + 4/2 = 3
-        # median is 2, 2, 4 = 2
-        assert run.data.metrics['median_median'] == 3
-        assert run.data.metrics['mean_mean'] == 3
-        assert run.data.metrics['stdev_stdev'] == 1.581
-        assert run.data.metrics['min_min'] == 1
-        assert run.data.metrics['max_max'] == 5
+    """
+    DominoRun will contain custom summarizaiton metrics for eval traces
+    """
+    exp = mlflow.set_experiment("test_domino_run_custom_aggregator")
+
+    @tracing.add_tracing(name="median", evaluator=lambda span: {'median': span.outputs})
+    def for_median(x):
+        return x
+
+    @tracing.add_tracing(name="mean", evaluator=lambda span: {'mean': span.outputs})
+    def for_mean(x):
+        return x
+
+    @tracing.add_tracing(name="stdev", evaluator=lambda span: {'stdev': span.outputs})
+    def for_stdev(x):
+        return x
+
+    @tracing.add_tracing(name="min", evaluator=lambda span: {'min': span.outputs})
+    def for_min(x):
+        return x
+
+    @tracing.add_tracing(name="max", evaluator=lambda span: {'max': span.outputs})
+    def for_max(x):
+        return x
+
+    summarization_metrics = [
+            ('median', 'median'),
+            ('mean', 'mean'),
+            ('stdev', 'stdev'),
+            ('min', 'min'),
+            ('max', 'max')
+    ]
+    run_id = None
+    with logging.DominoRun(custom_summary_metrics=summarization_metrics) as run:
+        run_id = run.info.run_id
+        for i in range(1, 6):
+            for_median(i)
+            for_mean(i)
+            for_stdev(i)
+            for_min(i)
+            for_max(i)
+
+    run = mlflow.get_run(run_id)
+
+    # verify run has summary metrics logged to it
+    # mean of outputs is 2 + 4/2 = 3
+    # median is 2, 2, 4 = 2
+    assert run.data.metrics['median_median'] == 3
+    assert run.data.metrics['mean_mean'] == 3
+    assert run.data.metrics['stdev_stdev'] == 1.581
+    assert run.data.metrics['min_min'] == 1
+    assert run.data.metrics['max_max'] == 5
+
 
 def test_domino_run_dev_bad_custom_aggregator(setup_mlflow_tracking_server, mlflow, tracing, logging):
-        """
-        DominoRun will fail if one of the aggregators is invalid
-        """
-        exp = mlflow.set_experiment("test_domino_run_dev_bad_custom_aggregator")
+    """
+    DominoRun will fail if one of the aggregators is invalid
+    """
+    exp = mlflow.set_experiment("test_domino_run_dev_bad_custom_aggregator")
 
-        summarization_metrics = [('max', 'sdf')]
+    summarization_metrics = [('max', 'sdf')]
+
+    with pytest.raises(ValueError):
+        logging.DominoRun(custom_summary_metrics=summarization_metrics)
 
-        with pytest.raises(ValueError):
-                logging.DominoRun(custom_summary_metrics=summarization_metrics)
 
 def test_domino_run_configure_experiment_name(setup_mlflow_tracking_server, mlflow, logging, tracing):
-        """
-        if an experiment name is provided, the DominoRun will create a run in that experiment
-        and log traces to it
-        """
-        mlflow.create_experiment("test_domino_run_configure_experiment_name")
-        exp_id = mlflow.create_experiment("test_domino_run_configure_experiment_name_other")
+    """
+    if an experiment name is provided, the DominoRun will create a run in that experiment
+    and log traces to it
+    """
+    mlflow.create_experiment("test_domino_run_configure_experiment_name")
+    exp_id = mlflow.create_experiment("test_domino_run_configure_experiment_name_other")
+
+    @tracing.add_tracing(name="unit")
+    def unit(x):
+        return x
 
-        @tracing.add_tracing(name="unit")
-        def unit(x):
-                return x
+    run_id = None
+    with logging.DominoRun("test_domino_run_configure_experiment_name_other") as run:
+        run_id = run.info.run_id
+        unit(1)
 
-        run_id = None
-        with logging.DominoRun("test_domino_run_configure_experiment_name_other") as run:
-                run_id = run.info.run_id
-                unit(1)
+    run = mlflow.get_run(run_id)
 
-        run = mlflow.get_run(run_id)
+    traces = mlflow.search_traces(experiment_ids=[exp_id], filter_string="trace.name = 'unit'")
 
-        traces = mlflow.search_traces(experiment_ids=[exp_id], filter_string=f"trace.name = 'unit'")
+    assert run.info.experiment_id == exp_id, "run should belong to test_domino_run_configure_experiment_name_other"
+    assert len(traces) == 1
 
-        assert run.info.experiment_id == exp_id, "run should belong to test_domino_run_configure_experiment_name_other"
-        assert len(traces) == 1
 
 def test_domino_run_extend_current_run(setup_mlflow_tracking_server, mlflow, logging, tracing):
-        """
-        if a run_id is provided, then the DominoRun with add traces to that run
-        """
-        mlflow.set_experiment("test_domino_run_extend_current_run")
+    """
+    if a run_id is provided, then the DominoRun with add traces to that run
+    """
+    mlflow.set_experiment("test_domino_run_extend_current_run")
 
-        @tracing.add_tracing(name="unit", evaluator=lambda span: { 'unit': span.outputs })
-        def unit(x):
-                return x
+    @tracing.add_tracing(name="unit", evaluator=lambda span: {'unit': span.outputs})
+    def unit(x):
+        return x
 
-        first_run_id = None
-        second_run_id = None
+    first_run_id = None
+    second_run_id = None
 
-        with logging.DominoRun() as run:
-                first_run_id = run.info.run_id
-                unit(1)
+    with logging.DominoRun() as run:
+        first_run_id = run.info.run_id
+        unit(1)
+
+    with logging.DominoRun(run_id=first_run_id) as run:
+        second_run_id = run.info.run_id
+        unit(2)
 
-        with logging.DominoRun(run_id=first_run_id) as run:
-                second_run_id = run.info.run_id
-                unit(2)
+    traces = mlflow.search_traces(experiment_ids=[run.info.experiment_id], filter_string=f"metadata.mlflow.sourceRun = '{first_run_id}'", return_type='list')
 
-        traces = mlflow.search_traces(experiment_ids=[run.info.experiment_id], filter_string=f"metadata.mlflow.sourceRun = '{first_run_id}'", return_type='list')
+    assert first_run_id == second_run_id, "Both runs should have the same run_id"
+    assert len(traces) == 2, "There should be two traces for unit"
 
-        assert first_run_id == second_run_id, "Both runs should have the same run_id"
-        assert len(traces) == 2, "There should be two traces for unit"
+    resumed_run = mlflow.get_run(first_run_id)
+    assert resumed_run.data.tags.get(AGENT_RUN_TAG) == "false", "DominoRun should tag the run as not an agent run"
 
-        resumed_run = mlflow.get_run(first_run_id)
-        assert resumed_run.data.tags.get(AGENT_RUN_TAG) == "false", "DominoRun should tag the run as not an agent run"
+    # each domino run should have an external model linked to it
+    models = mlflow.search_logged_models(experiment_ids=[run.info.experiment_id], output_format='list')
+    assert [m.source_run_id for m in models] == [first_run_id, first_run_id]
 
-        # each domino run should have an external model linked to it
-        models = mlflow.search_logged_models(experiment_ids=[run.info.experiment_id], output_format='list')
-        assert [m.source_run_id for m in models] == [first_run_id, first_run_id]
 
 def test_domino_run_should_not_swallow_exceptions(setup_mlflow_tracking_server, mlflow, logging):
-        """
-        If the user's code raises an exception, the DominoRun should allow user code to catch it
-        """
-        mlflow.set_experiment("test_domino_run_should_not_swallow_exceptions")
+    """
+    If the user's code raises an exception, the DominoRun should allow user code to catch it
+    """
+    mlflow.set_experiment("test_domino_run_should_not_swallow_exceptions")
+
+    with pytest.raises(ZeroDivisionError):
+        with logging.DominoRun() as run:
+            1/0
 
-        with pytest.raises(ZeroDivisionError):
-            with logging.DominoRun() as run:
-                1/0
 
 def test_domino_run_parallelized_logic(setup_mlflow_tracking_server, mlflow, logging, tracing):
-        """
-        Logic run in threads should execute normally
-        """
-        mlflow.set_experiment("test_domino_run_parallelized_logic")
+    """
+    Logic run in threads should execute normally
+    """
+    mlflow.set_experiment("test_domino_run_parallelized_logic")
+
+    @tracing.add_tracing(name="a")
+    def a(num):
+        return num
 
-        @tracing.add_tracing(name="a")
-        def a(num):
-                return num
+    @tracing.add_tracing(name="b")
+    def b(num):
+        return num
 
-        @tracing.add_tracing(name="b")
-        def b(num):
-                return num
+    with logging.DominoRun():
+        t1 = threading.Thread(target=a, args=(10,))
+        t2 = threading.Thread(target=b, args=(10,))
 
-        with logging.DominoRun():
-                t1 = threading.Thread(target=a, args=(10,))
-                t2 = threading.Thread(target=b, args=(10,))
+        t1.start()
+        t2.start()
 
-                t1.start()
-                t2.start()
+        t1.join()
+        t2.join()
 
-                t1.join()
-                t2.join()
+    traces_a = mlflow.search_traces(filter_string="trace.name = 'a'", return_type='list')
+    traces_b = mlflow.search_traces(filter_string="trace.name = 'b'", return_type='list')
 
-        traces_a = mlflow.search_traces(filter_string="trace.name = 'a'", return_type='list')
-        traces_b = mlflow.search_traces(filter_string="trace.name = 'b'", return_type='list')
-        def get_run_id(trace):
-                return trace.info.trace_metadata.get('mlflow.sourceRun')
+    def get_run_id(trace):
+        return trace.info.trace_metadata.get('mlflow.sourceRun')
+
+    assert len(traces_a) == 1, "There should be one trace for a"
+    assert len(traces_b) == 1, "There should be one trace for b"
+    assert get_run_id(traces_a[0]) == get_run_id(traces_b[0]), "The a and b traces should belong to the same run"
 
-        assert len(traces_a) == 1, "There should be one trace for a"
-        assert len(traces_b) == 1, "There should be one trace for b"
-        assert get_run_id(traces_a[0]) == get_run_id(traces_b[0]), "The a and b traces should belong to the same run"
 
 def test_domino_run_extend_concluded_run_manual_evals_mean_logged(setup_mlflow_tracking_server, mlflow, tracing, logging):
-        """
-        When extending a concluded run, manual log_evaluation calls inside the DominoRun block
-        are summarized and the average metric is logged to the run.
-        """
-        mlflow.set_experiment("test_domino_run_extend_concluded_run_manual_evals_mean_logged")
-
-        @tracing.add_tracing(name="add_numbers")
-        def add_numbers(x, y):
-                return x + y
-
-        # Create and conclude a run with two traces (outputs: 2 and 4)
-        with mlflow.start_run() as parent_run:
-                concluded_run_id = parent_run.info.run_id
-                add_numbers(1, 1)
-                add_numbers(2, 2)
-
-        # Extend the concluded run and log manual evaluations; DominoRun should log mean summary (3)
-        with logging.DominoRun(run_id=concluded_run_id):
-                traces_resp = tracing.search_traces(run_id=concluded_run_id, trace_name="add_numbers")
-                for t in traces_resp.data:
-                        # use the function output as the evaluation value
-                        value = t.spans[0].outputs
-                        logging.log_evaluation(
-                                trace_id=t.id,
-                                name="helpfulness",
-                                value=value,
-                        )
-
-        run = mlflow.get_run(concluded_run_id)
-        # average of 2 + 4 = 3
-        assert run.data.metrics['mean_helpfulness'] == 3, "average of helpfulness should be 3"
+    """
+    When extending a concluded run, manual log_evaluation calls inside the DominoRun block
+    are summarized and the average metric is logged to the run.
+    """
+    mlflow.set_experiment("test_domino_run_extend_concluded_run_manual_evals_mean_logged")
+
+    @tracing.add_tracing(name="add_numbers")
+    def add_numbers(x, y):
+        return x + y
+
+    # Create and conclude a run with two traces (outputs: 2 and 4)
+    with mlflow.start_run() as parent_run:
+        concluded_run_id = parent_run.info.run_id
+        add_numbers(1, 1)
+        add_numbers(2, 2)
+
+    # Extend the concluded run and log manual evaluations; DominoRun should log mean summary (3)
+    with logging.DominoRun(run_id=concluded_run_id):
+        traces_resp = tracing.search_traces(run_id=concluded_run_id, trace_name="add_numbers")
+        for t in traces_resp.data:
+            # use the function output as the evaluation value
+            value = t.spans[0].outputs
+            logging.log_evaluation(
+                    trace_id=t.id,
+                    name="helpfulness",
+                    value=value,
+            )
+
+    run = mlflow.get_run(concluded_run_id)
+    # average of 2 + 4 = 3
+    assert run.data.metrics['mean_helpfulness'] == 3, "average of helpfulness should be 3"
+
 
 def test_domino_run_extend_concluded_run_manual_evals_custom_aggregator_logged(setup_mlflow_tracking_server, mlflow, tracing, logging):
-        """
-        When extending a concluded run, with a custom aggregator, manual log_evaluation calls inside the
-        DominoRun block are summarized using the custom aggregator and logged to the run.
-        """
-        mlflow.set_experiment("test_domino_run_extend_concluded_run_manual_evals_custom_aggregator_logged")
-
-        @tracing.add_tracing(name="add_numbers")
-        def add_numbers(x, y):
-                return x + y
-
-        # Create and conclude a run with two traces (outputs: 2 and 4)
-        with mlflow.start_run() as parent_run:
-                concluded_run_id = parent_run.info.run_id
-                add_numbers(1, 1)
-                add_numbers(2, 2)
-
-        # Extend the concluded run and log manual evaluations; DominoRun should log custom summary (max -> 4)
-        custom_summary_metrics = [("helpfulness", "max")]
-        with logging.DominoRun(run_id=concluded_run_id, custom_summary_metrics=custom_summary_metrics):
-                traces_resp = tracing.search_traces(run_id=concluded_run_id, trace_name="add_numbers")
-                for t in traces_resp.data:
-                        value = t.spans[0].outputs
-                        logging.log_evaluation(
-                                trace_id=t.id,
-                                name="helpfulness",
-                                value=value,
-                        )
-
-        run = mlflow.get_run(concluded_run_id)
-        # max of 2 and 4 is 4
-        assert run.data.metrics['max_helpfulness'] == 4, "max of helpfulness should be 4"
+    """
+    When extending a concluded run, with a custom aggregator, manual log_evaluation calls inside the
+    DominoRun block are summarized using the custom aggregator and logged to the run.
+    """
+    mlflow.set_experiment("test_domino_run_extend_concluded_run_manual_evals_custom_aggregator_logged")
+
+    @tracing.add_tracing(name="add_numbers")
+    def add_numbers(x, y):
+        return x + y
+
+    # Create and conclude a run with two traces (outputs: 2 and 4)
+    with mlflow.start_run() as parent_run:
+        concluded_run_id = parent_run.info.run_id
+        add_numbers(1, 1)
+        add_numbers(2, 2)
+
+    # Extend the concluded run and log manual evaluations; DominoRun should log custom summary (max -> 4)
+    custom_summary_metrics = [("helpfulness", "max")]
+    with logging.DominoRun(run_id=concluded_run_id, custom_summary_metrics=custom_summary_metrics):
+        traces_resp = tracing.search_traces(run_id=concluded_run_id, trace_name="add_numbers")
+        for t in traces_resp.data:
+            value = t.spans[0].outputs
+            logging.log_evaluation(
+                    trace_id=t.id,
+                    name="helpfulness",
+                    value=value,
+            )
+
+    run = mlflow.get_run(concluded_run_id)
+    # max of 2 and 4 is 4
+    assert run.data.metrics['max_helpfulness'] == 4, "max of helpfulness should be 4"
+
 
 def test_domino_run_recomputes_existing_aggregations(setup_mlflow_tracking_server, mlflow, tracing, logging):
-        """
-        When a run already has aggregated metrics (e.g., max_), a subsequent DominoRun
-        on the same run_id recomputes those aggregations in addition to defaults.
-        """
-        exp = mlflow.set_experiment("test_domino_run_recomputes_existing_aggregations")
+    """
+    When a run already has aggregated metrics (e.g., max_), a subsequent DominoRun
+    on the same run_id recomputes those aggregations in addition to defaults.
+    """
+    exp = mlflow.set_experiment("test_domino_run_recomputes_existing_aggregations")
 
-        @tracing.add_tracing(name="agg", evaluator=lambda span: { 'agg': span.outputs })
-        def agg_fn(x):
-                return x
+    @tracing.add_tracing(name="agg", evaluator=lambda span: {'agg': span.outputs})
+    def agg_fn(x):
+        return x
 
-        run_id = None
-        # First run computes both default mean and custom max aggregations
-        with logging.DominoRun(custom_summary_metrics=[('agg', 'mean'), ('agg', 'max')]) as run:
-                run_id = run.info.run_id
-                agg_fn(1)
-                agg_fn(3)
+    run_id = None
+    # First run computes both default mean and custom max aggregations
+    with logging.DominoRun(custom_summary_metrics=[('agg', 'mean'), ('agg', 'max')]) as run:
+        run_id = run.info.run_id
+        agg_fn(1)
+        agg_fn(3)
 
+    run = mlflow.get_run(run_id)
+    assert run.data.metrics['mean_agg'] == 2, 'mean should be 2'
+    assert run.data.metrics['max_agg'] == 3, 'max should be 3'
 
-        run = mlflow.get_run(run_id)
-        assert run.data.metrics['mean_agg'] == 2, 'mean should be 2'
-        assert run.data.metrics['max_agg'] == 3, 'max should be 3'
+    # Second run continues the same run and adds a new value; expects recomputed max (and mean)
+    with logging.DominoRun(run_id=run_id) as run2:
+        agg_fn(5)
 
-        # Second run continues the same run and adds a new value; expects recomputed max (and mean)
-        with logging.DominoRun(run_id=run_id) as run2:
-                agg_fn(5)
+    run = mlflow.get_run(run_id)
+    assert run.data.metrics['max_agg'] == 5, 'max should be 5'
+    assert run.data.metrics['mean_agg'] == 3, 'mean should be 3'
 
-        run = mlflow.get_run(run_id)
-        assert run.data.metrics['max_agg'] == 5, 'max should be 5'
-        assert run.data.metrics['mean_agg'] == 3, 'mean should be 3'
 
 def test_domino_agent_context_tags_run(setup_mlflow_tracking_server, mlflow, logging):
-        mlflow.set_experiment("test_domino_agent_context_tags_run")
+    mlflow.set_experiment("test_domino_agent_context_tags_run")
+
+    with logging.DominoAgentContext() as run:
+        run_id = run.info.run_id
 
-        with logging.DominoAgentContext() as run:
-                run_id = run.info.run_id
+    run = mlflow.get_run(run_id)
+    assert run.data.tags.get(AGENT_RUN_TAG) == "true", "DominoAgentContext should tag the run"
 
-        run = mlflow.get_run(run_id)
-        assert run.data.tags.get(AGENT_RUN_TAG) == "true", "DominoAgentContext should tag the run"
 
 def test_domino_agent_context_tags_resumed_run(setup_mlflow_tracking_server, mlflow, logging):
-        mlflow.set_experiment("test_domino_agent_context_tags_resumed_run")
+    mlflow.set_experiment("test_domino_agent_context_tags_resumed_run")
 
-        with logging.DominoAgentContext() as run:
-                first_run_id = run.info.run_id
+    with logging.DominoAgentContext() as run:
+        first_run_id = run.info.run_id
 
-        with logging.DominoAgentContext(run_id=first_run_id) as run:
-                pass
+    with logging.DominoAgentContext(run_id=first_run_id) as run:
+        pass
 
-        run = mlflow.get_run(first_run_id)
-        assert run.data.tags.get(AGENT_RUN_TAG) == "true", "DominoAgentContext should tag the resumed run"
+    run = mlflow.get_run(first_run_id)
+    assert run.data.tags.get(AGENT_RUN_TAG) == "true", "DominoAgentContext should tag the resumed run"
diff --git a/tests/integration/agents/test_logging.py b/tests/integration/agents/test_logging.py
index 15376912..53b23556 100644
--- a/tests/integration/agents/test_logging.py
+++ b/tests/integration/agents/test_logging.py
@@ -3,67 +3,70 @@
 from domino.agents._eval_tags import InvalidEvaluationLabelException
 from .mlflow_fixtures import fixture_create_traces
 
+
 def test_log_evaluation_dev(setup_mlflow_tracking_server, mlflow, logging):
-	# create experiment
-	exp = mlflow.set_experiment("test_log_evaluation")
-
-	fixture_create_traces()
-
-	# log evaluations to traces
-	traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
-	for trace in traces:
-		logging.log_evaluation(
-			trace.info.trace_id,
-			value=1,
-			name="helpfulness",
-		)
-		logging.log_evaluation(
-			trace.info.trace_id,
-			value="dogs",
-			name="category",
-		)
-
-	# verify tags on traces
-	tagged_traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type="list")
-	tags = tagged_traces[0].info.tags
-
-	assert tags['domino.prog.label.category'] == 'dogs'
-	assert tags['domino.prog.metric.helpfulness'] == '1'
-	assert tags['domino.internal.is_eval'] == 'true'
+    # create experiment
+    exp = mlflow.set_experiment("test_log_evaluation")
+
+    fixture_create_traces()
+
+    # log evaluations to traces
+    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
+    for trace in traces:
+        logging.log_evaluation(
+                trace.info.trace_id,
+                value=1,
+                name="helpfulness",
+        )
+        logging.log_evaluation(
+                trace.info.trace_id,
+                value="dogs",
+                name="category",
+        )
+
+    # verify tags on traces
+    tagged_traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type="list")
+    tags = tagged_traces[0].info.tags
+
+    assert tags['domino.prog.label.category'] == 'dogs'
+    assert tags['domino.prog.metric.helpfulness'] == '1'
+    assert tags['domino.internal.is_eval'] == 'true'
+
 
 def test_log_evaluation_invalid_name(setup_mlflow_tracking_server, mlflow, logging):
-	# create experiment
-	exp = mlflow.set_experiment("test_log_evaluation_invalid_name")
+    # create experiment
+    exp = mlflow.set_experiment("test_log_evaluation_invalid_name")
+
+    fixture_create_traces()
 
-	fixture_create_traces()
+    # log evaluations to traces
+    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
+    trace = traces[0]
 
-	# log evaluations to traces
-	traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
-	trace = traces[0]
+    with pytest.raises(InvalidEvaluationLabelException):
+        logging.log_evaluation(
+                trace.info.trace_id,
+                value=1,
+                name="*",
+        )
 
-	with pytest.raises(InvalidEvaluationLabelException):
-		logging.log_evaluation(
-			trace.info.trace_id,
-			value=1,
-			name="*",
-		)
 
 def test_log_evaluation_non_string_float(setup_mlflow_tracking_server, mlflow, logging):
-	"""
-	Log evaluation should not allow logging objects
-	"""
-	# create experiment
-	exp = mlflow.set_experiment("test_log_evaluation_non_string_float")
-
-	fixture_create_traces()
-
-	# log evaluations to traces
-	traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
-	trace = traces[0]
-
-	with pytest.raises(TypeError):
-		logging.log_evaluation(
-			trace.info.trace_id,
-			value={},
-			name="myobject",
-		)
+    """
+    Log evaluation should not allow logging objects
+    """
+    # create experiment
+    exp = mlflow.set_experiment("test_log_evaluation_non_string_float")
+
+    fixture_create_traces()
+
+    # log evaluations to traces
+    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
+    trace = traces[0]
+
+    with pytest.raises(TypeError):
+        logging.log_evaluation(
+                trace.info.trace_id,
+                value={},
+                name="myobject",
+        )
diff --git a/tests/integration/agents/test_tracing.py b/tests/integration/agents/test_tracing.py
index 70e05794..20fb80d4 100644
--- a/tests/integration/agents/test_tracing.py
+++ b/tests/integration/agents/test_tracing.py
@@ -3,10 +3,6 @@
 import inspect
 import logging as logger
 import os
-import random
-import subprocess
-from openai import OpenAI
-import openai
 import pytest
 import threading
 import time
@@ -14,987 +10,1016 @@
 
 from ...conftest import TEST_AGENTS_ENV_VARS
 from domino.agents._constants import EXPERIMENT_AGENT_TAG
-from .mlflow_fixtures import fixture_create_prod_traces, create_span_at_time, add_prod_tags
+from .mlflow_fixtures import fixture_create_prod_traces, add_prod_tags
 from .test_util import reset_prod_tracing
-from domino.agents._client import client
 from domino.agents.tracing._util import build_agent_experiment_name
 # NOTE: don't use this import to test public functions, use the tracing pytest fixture instead
 from domino.agents.tracing.tracing import _search_traces
 from domino.agents._eval_tags import InvalidEvaluationLabelException
 
+
 def test_init_tracing_prod(setup_mlflow_tracking_server, mocker, mlflow, tracing):
-        """
-        should initialize autologging only once for each framework
-        should create an experiment for the agent and tag it only once
-        """
-        app_id = "appid"
-        test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
-        expected_experiment_name = build_agent_experiment_name(app_id)
-        env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
+    """
+    should initialize autologging only once for each framework
+    should create an experiment for the agent and tag it only once
+    """
+    app_id = "appid"
+    test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
+    expected_experiment_name = build_agent_experiment_name(app_id)
+    env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
+
+    import domino.agents.tracing.tracing
+    import domino.agents._client
+    import mlflow
+    autolog_spy = mocker.spy(domino.agents.tracing.inittracing, "call_autolog")
+    set_experiment_tag_spy = mocker.spy(domino.agents._client.client, "set_experiment_tag")
+    set_experiment_spy = mocker.spy(mlflow, "set_experiment")
+
+    reset_prod_tracing()
+
+    with patch.dict(os.environ, env_vars, clear=True):
+        tracing.init_tracing(["sklearn"])
+        tracing.init_tracing(["sklearn"])
+        found_exp = mlflow.get_experiment_by_name(expected_experiment_name)
+
+        assert autolog_spy.call_args_list == [call('sklearn')]
+        assert set_experiment_tag_spy.call_count == 1, "should only save tag on experiment once"
+        assert set_experiment_spy.call_count != 0, "should set an active experiment"
+        assert found_exp is not None, "agent experiment should exist"
+        assert found_exp.tags.get(EXPERIMENT_AGENT_TAG) == "true", "agent experiment should be tagged"
 
-        import domino.agents.tracing.tracing
-        import domino.agents._client
-        import mlflow
-        autolog_spy = mocker.spy(domino.agents.tracing.inittracing, "call_autolog")
-        set_experiment_tag_spy = mocker.spy(domino.agents._client.client, "set_experiment_tag")
-        set_experiment_spy = mocker.spy(mlflow, "set_experiment")
 
-        reset_prod_tracing()
+def test_init_tracing_logs_experiment_creation_debug(setup_mlflow_tracking_server, mlflow, tracing, caplog):
+    """
+    when log level is debug, verify the experiment creation log includes the experiment ID
+    """
+    app_id = "app_id_logs_debug"
+    test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
+    env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
 
-        with patch.dict(os.environ, env_vars, clear=True):
-                tracing.init_tracing(["sklearn"])
-                tracing.init_tracing(["sklearn"])
-                found_exp = mlflow.get_experiment_by_name(expected_experiment_name)
+    reset_prod_tracing()
 
-                assert autolog_spy.call_args_list == [call('sklearn')]
-                assert set_experiment_tag_spy.call_count == 1, "should only save tag on experiment once"
-                assert set_experiment_spy.call_count is not 0, "should set an active experiment"
-                assert found_exp is not None, "agent experiment should exist"
-                assert found_exp.tags.get(EXPERIMENT_AGENT_TAG) == "true", "agent experiment should be tagged"
+    with patch.dict(os.environ, env_vars, clear=True), caplog.at_level(logger.DEBUG):
+        tracing.init_tracing()
+        expected_experiment_name = build_agent_experiment_name(app_id)
+        exp = mlflow.get_experiment_by_name(expected_experiment_name)
+        assert exp is not None, "experiment should be created in prod mode"
+        assert f"Created experiment for Agent with ID {exp.experiment_id}" in caplog.text
 
-def test_init_tracing_logs_experiment_creation_debug(setup_mlflow_tracking_server, mlflow, tracing, caplog):
-        """
-        when log level is debug, verify the experiment creation log includes the experiment ID
-        """
-        app_id = "app_id_logs_debug"
-        test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
-        env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
-
-        reset_prod_tracing()
-
-        with patch.dict(os.environ, env_vars, clear=True), caplog.at_level(logger.DEBUG):
-                tracing.init_tracing()
-                expected_experiment_name = build_agent_experiment_name(app_id)
-                exp = mlflow.get_experiment_by_name(expected_experiment_name)
-                assert exp is not None, "experiment should be created in prod mode"
-                assert f"Created experiment for Agent with ID {exp.experiment_id}" in caplog.text
 
 def test_logging_traces_prod(setup_mlflow_tracking_server, mocker, mlflow, tracing):
-        """
-        traces created in separate threads forked from the same main thread
-        should be saved to the same agent experiment
-        """
-        app_id = "threaded_app_id"
-        test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
-        expected_experiment_name = build_agent_experiment_name(app_id)
-        env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
+    """
+    traces created in separate threads forked from the same main thread
+    should be saved to the same agent experiment
+    """
+    app_id = "threaded_app_id"
+    test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
+    expected_experiment_name = build_agent_experiment_name(app_id)
+    env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
 
-        reset_prod_tracing()
+    reset_prod_tracing()
 
-        with patch.dict(os.environ, env_vars, clear=True):
-                tracing.init_tracing()
+    with patch.dict(os.environ, env_vars, clear=True):
+        tracing.init_tracing()
 
-                @tracing.add_tracing(name="a")
-                def a(num):
-                        return num
+        @tracing.add_tracing(name="a")
+        def a(num):
+            return num
 
-                @tracing.add_tracing(name="b")
-                def b(num):
-                        return num
+        @tracing.add_tracing(name="b")
+        def b(num):
+            return num
 
-                t1 = threading.Thread(target=a, args=(10,))
-                t2 = threading.Thread(target=b, args=(10,))
+        t1 = threading.Thread(target=a, args=(10,))
+        t2 = threading.Thread(target=b, args=(10,))
 
-                t1.start()
-                t2.start()
+        t1.start()
+        t2.start()
 
-                t1.join()
-                t2.join()
+        t1.join()
+        t2.join()
 
-        # a and b traces should all be in the agent experiment
-        traces_a = mlflow.search_traces(filter_string="trace.name = 'a'", return_type='list')
-        traces_b = mlflow.search_traces(filter_string="trace.name = 'b'", return_type='list')
+    # a and b traces should all be in the agent experiment
+    traces_a = mlflow.search_traces(filter_string="trace.name = 'a'", return_type='list')
+    traces_b = mlflow.search_traces(filter_string="trace.name = 'b'", return_type='list')
 
-        def get_experiment_id(trace):
-                return trace.info.trace_location.mlflow_experiment.experiment_id
+    def get_experiment_id(trace):
+        return trace.info.trace_location.mlflow_experiment.experiment_id
+
+    found_exp_ids = set([get_experiment_id(t) for t in traces_a + traces_b])
+    actual_exp_id = set([mlflow.get_experiment_by_name(expected_experiment_name).experiment_id])
+    assert found_exp_ids == actual_exp_id, "traces should be linked to the agent experiment"
 
-        found_exp_ids = set([get_experiment_id(t) for t in traces_a + traces_b])
-        actual_exp_id = set([mlflow.get_experiment_by_name(expected_experiment_name).experiment_id])
-        assert found_exp_ids == actual_exp_id, "traces should be linked to the agent experiment"
 
 def test_inline_evaluators_should_not_run_prod(setup_mlflow_tracking_server, tracing):
-        """
-        in prod mode, inline evaluators should not run
-        """
-        app_id = "inline_evals_prod"
-        test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
-        env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
+    """
+    in prod mode, inline evaluators should not run
+    """
+    app_id = "inline_evals_prod"
+    test_case_vars = {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
+    env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
+
+    reset_prod_tracing()
 
-        reset_prod_tracing()
+    @tracing.add_tracing(name="span_unit", evaluator=lambda span: {'span_result': 1})
+    def span_unit(x):
+        return x
 
-        @tracing.add_tracing(name="span_unit", evaluator=lambda span: { 'span_result': 1 })
-        def span_unit(x):
-                return x
+    @tracing.add_tracing(name="trace_unit", trace_evaluator=lambda trace: {'trace_result': 1})
+    def trace_unit(x):
+        return x
 
-        @tracing.add_tracing(name="trace_unit", trace_evaluator=lambda trace: { 'trace_result': 1 })
-        def trace_unit(x):
-                return x
+    @tracing.add_tracing(name="trace_and_unit", evaluator=lambda span: {'both_span_result': 1}, trace_evaluator=lambda trace: {'both_trace_result': 1})
+    def trace_and_unit(x):
+        return x
 
-        @tracing.add_tracing(name="trace_and_unit", evaluator=lambda span: { 'both_span_result': 1 }, trace_evaluator=lambda trace: { 'both_trace_result': 1 })
-        def trace_and_unit(x):
-                return x
+    with patch.dict(os.environ, env_vars, clear=True):
+        tracing.init_tracing()
+        span_unit(1)
+        trace_unit(1)
+        trace_and_unit(1)
 
-        with patch.dict(os.environ, env_vars, clear=True):
-                tracing.init_tracing()
-                span_unit(1)
-                trace_unit(1)
-                trace_and_unit(1)
+    # add prod tags, like what domino services would do
+    add_prod_tags(None, app_id, "v1")
 
-        # add prod tags, like what domino services would do
-        add_prod_tags(None, app_id, "v1")
+    ts = tracing.search_agent_traces(agent_id=app_id)
+    eval_results = [r for trace in ts.data for r in trace.evaluation_results]
 
-        ts = tracing.search_agent_traces(agent_id=app_id)
-        eval_results = [r for trace in ts.data for r in trace.evaluation_results]
+    assert len(ts.data) == 3, "three traces should be created"
+    assert len(eval_results) == 0, "no evaluation results should be logged in prod mode"
 
-        assert len(ts.data) == 3, "three traces should be created"
-        assert len(eval_results) == 0, "no evaluation results should be logged in prod mode"
 
 def test_init_tracing_dev_mode(setup_mlflow_tracking_server, mocker, mlflow, tracing):
-        """
-        should not create an experiment or set tags
-        """
-        import domino.agents._client
-        import mlflow
-        set_experiment_tag_spy = mocker.spy(domino.agents._client.client, "set_experiment_tag")
-        set_experiment_spy = mocker.spy(mlflow, "set_experiment")
+    """
+    should not create an experiment or set tags
+    """
+    import domino.agents._client
+    import mlflow
+    set_experiment_tag_spy = mocker.spy(domino.agents._client.client, "set_experiment_tag")
+    set_experiment_spy = mocker.spy(mlflow, "set_experiment")
+
+    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
+        tracing.init_tracing(["sklearn"])
 
-        with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
-                tracing.init_tracing(["sklearn"])
+        assert set_experiment_tag_spy.call_count == 0, "should set experiment tag"
+        assert set_experiment_spy.call_count == 0, "should not set an active experiment"
 
-                assert set_experiment_tag_spy.call_count == 0, "should set experiment tag"
-                assert set_experiment_spy.call_count == 0, "should not set an active experiment"
 
 def test_add_tracing_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        """
-        add_tracing will create a new trace with a given name
-        and attach evaluation tags to the trace
-        """
-        # must import logging from the module instead of the package
-        # so that mocker works
-        exp = mlflow.set_experiment("test_add_tracing_dev")
+    """
+    add_tracing will create a new trace with a given name
+    and attach evaluation tags to the trace
+    """
+    # must import logging from the module instead of the package
+    # so that mocker works
+    exp = mlflow.set_experiment("test_add_tracing_dev")
 
-        @tracing.add_tracing(name="add_numbers", autolog_frameworks=["sklearn"], evaluator=lambda span: { 'result': span.outputs })
-        def add_numbers(x, y):
-                return x + y
+    @tracing.add_tracing(name="add_numbers", autolog_frameworks=["sklearn"], evaluator=lambda span: {'result': span.outputs})
+    def add_numbers(x, y):
+        return x + y
 
-        with logging.DominoRun("test_add_tracing_dev"):
-                add_numbers(1, 1)
+    with logging.DominoRun("test_add_tracing_dev"):
+        add_numbers(1, 1)
 
-        ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
-        assert len(ts) == 1, "only one trace should be created"
+    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    assert len(ts) == 1, "only one trace should be created"
+
+    # assert tags
+    tags = ts[0].info.tags
+    assert tags['domino.prog.metric.result'] == '2'
+    assert tags['domino.internal.is_eval'] == 'true'
 
-        # assert tags
-        tags = ts[0].info.tags
-        assert tags['domino.prog.metric.result'] == '2'
-        assert tags['domino.internal.is_eval'] == 'true'
 
 def test_add_tracing_dev_use_trace_in_evaluator(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging, caplog):
-        """
-        User can access a trace in an inline evaluator on the trace parent, and not the child
-        """
-        exp = mlflow.set_experiment("test_add_tracing_dev_use_trace_in_evaluator")
+    """
+    User can access a trace in an inline evaluator on the trace parent, and not the child
+    """
+    exp = mlflow.set_experiment("test_add_tracing_dev_use_trace_in_evaluator")
 
-        @tracing.add_tracing(name="parent", evaluator=lambda span: { 'span_exists_parent': 'True' }, trace_evaluator=lambda trace: { 'trace_exists_parent': 'True' })
-        def parent(x):
-                return unit(x)
+    @tracing.add_tracing(name="parent", evaluator=lambda span: {'span_exists_parent': 'True'}, trace_evaluator=lambda trace: {'trace_exists_parent': 'True'})
+    def parent(x):
+        return unit(x)
 
-        def child_trace_evaluator(trace):
-                return { 'trace_exists_child': 'True' }
+    def child_trace_evaluator(trace):
+        return {'trace_exists_child': 'True'}
 
-        @tracing.add_tracing(name="unit", evaluator=lambda span: { 'span_exists_child': 'True' }, trace_evaluator=child_trace_evaluator)
-        def unit(x):
-                return x
+    @tracing.add_tracing(name="unit", evaluator=lambda span: {'span_exists_child': 'True'}, trace_evaluator=child_trace_evaluator)
+    def unit(x):
+        return x
 
-        with logging.DominoRun() as run, caplog.at_level(logger.WARNING):
-                parent(1)
+    with logging.DominoRun() as run, caplog.at_level(logger.WARNING):
+        parent(1)
 
-        parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+
+    evals = {r.name: r.value for r in parent_t.evaluation_results}
+    assert evals.get('trace_exists_parent') == 'True'
+    assert 'trace_exists_child' not in evals
+    assert evals.get('span_exists_parent') == 'True'
+    assert evals.get('span_exists_child') == 'True'
+    assert "A trace_evaluator child_trace_evaluator was provided, but the trace could not be found" in caplog.text
 
-        evals = {r.name: r.value for r in parent_t.evaluation_results}
-        assert evals.get('trace_exists_parent') == 'True'
-        assert 'trace_exists_child' not in evals
-        assert evals.get('span_exists_parent') == 'True'
-        assert evals.get('span_exists_child') == 'True'
-        assert "A trace_evaluator child_trace_evaluator was provided, but the trace could not be found" in caplog.text
 
 def test_add_tracing_invalid_label(setup_mlflow_tracking_server, tracing):
-        with pytest.raises(InvalidEvaluationLabelException):
-                @tracing.add_tracing(name="*")
-                def unit(x):
-                        return x
+    with pytest.raises(InvalidEvaluationLabelException):
+        @tracing.add_tracing(name="*")
+        def unit(x):
+            return x
+
 
 def test_add_tracing_dev_no_evaluator(setup_mlflow_tracking_server, mlflow, tracing, logging):
-        """
-        add_tracing will create a new trace not add evaluations
-        """
-        exp = mlflow.set_experiment("test_add_tracing_dev_no_evaluator")
+    """
+    add_tracing will create a new trace not add evaluations
+    """
+    exp = mlflow.set_experiment("test_add_tracing_dev_no_evaluator")
 
-        @tracing.add_tracing(name="add_numbers")
-        def add_numbers(x, y):
-                return x + y
+    @tracing.add_tracing(name="add_numbers")
+    def add_numbers(x, y):
+        return x + y
 
-        with logging.DominoRun("test_add_tracing_dev_no_evaluator"):
-                add_numbers(1, 1)
+    with logging.DominoRun("test_add_tracing_dev_no_evaluator"):
+        add_numbers(1, 1)
 
-        # assert tags
-        ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
-        tags = ts[0].info.tags
+    # assert tags
+    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    tags = ts[0].info.tags
+
+    assert 'domino.internal.is_eval' not in tags
 
-        assert 'domino.internal.is_eval' not in tags
 
 def test_add_tracing_decorator_preserves_function_info(setup_mlflow_tracking_server, tracing):
-        def func_with_args(a: int, b: int, c: int=10, *args, **kwargs):
-                """Function with various parameter types."""
-                return a + b + c
-
-        @tracing.add_tracing(name="decorated_func")
-        def decorated_func(a: int, b: int, c: int=10, *args, **kwargs):
-                """returns the input value"""
-                return a + b + c
-
-        original_sig = inspect.signature(func_with_args)
-        decorated_sig = inspect.signature(decorated_func)
-
-        assert decorated_func.__name__ == "decorated_func", "the function name should be preserved by the decorator"
-        assert decorated_func.__doc__ == "returns the input value", "the function docstring should be preserved by the decorator"
-        assert decorated_func.__module__ == "tests.integration.agents.test_tracing"
-        assert decorated_sig == original_sig
-        assert list(decorated_sig.parameters.keys()) == ['a', 'b', 'c', 'args', 'kwargs']
-        assert decorated_sig.parameters['c'].default == 10
-        assert decorated_sig.parameters['a'].annotation == int
+    def func_with_args(a: int, b: int, c: int = 10, *args, **kwargs):
+        """Function with various parameter types."""
+        return a + b + c
+
+    @tracing.add_tracing(name="decorated_func")
+    def decorated_func(a: int, b: int, c: int = 10, *args, **kwargs):
+        """returns the input value"""
+        return a + b + c
+
+    original_sig = inspect.signature(func_with_args)
+    decorated_sig = inspect.signature(decorated_func)
+
+    assert decorated_func.__name__ == "decorated_func", "the function name should be preserved by the decorator"
+    assert decorated_func.__doc__ == "returns the input value", "the function docstring should be preserved by the decorator"
+    assert decorated_func.__module__ == "tests.integration.agents.test_tracing"
+    assert decorated_sig == original_sig
+    assert list(decorated_sig.parameters.keys()) == ['a', 'b', 'c', 'args', 'kwargs']
+    assert decorated_sig.parameters['c'].default == 10
+    assert decorated_sig.parameters['a'].annotation == int
+
 
 def test_add_tracing_preseves_self_and_cls(setup_mlflow_tracking_server, tracing):
-        """
-        add_tracing should preserve self and cls for functionality of the decorated method
-        """
-        class MyClass:
-                class_value = 2
-                def __init__(self):
-                        self.value = 1
+    """
+    add_tracing should preserve self and cls for functionality of the decorated method
+    """
+    class MyClass:
+        class_value = 2
+
+        def __init__(self):
+            self.value = 1
+
+        @tracing.add_tracing(name="instance_method")
+        def instance_method(self, x):
+            return self.value + x
 
-                @tracing.add_tracing(name="instance_method")
-                def instance_method(self, x):
-                        return self.value + x
+        @classmethod
+        @tracing.add_tracing(name="class_method")
+        def class_method(cls, x):
+            return cls.class_value + x
 
-                @classmethod
-                @tracing.add_tracing(name="class_method")
-                def class_method(cls, x):
-                        return cls.class_value + x
+    obj = MyClass()
 
-        obj = MyClass()
+    assert obj.instance_method(1) == 2
+    assert MyClass.class_method(1) == 3
 
-        assert obj.instance_method(1) == 2
-        assert MyClass.class_method(1) == 3
 
 def test_add_tracing_arguments_passed_to_span(setup_mlflow_tracking_server, tracing, mlflow):
-        """
-        add_tracing should preserve self and cls for functionality of the decorated method,
-        but should not pass them as inputs to the trace.
+    """
+    add_tracing should preserve self and cls for functionality of the decorated method,
+    but should not pass them as inputs to the trace.
 
-        it should pass args and kwargs as inputs, also
-        it should pass default values
-        """
-        exp = mlflow.set_experiment("test_add_tracing_arguments_passed_to_span")
-        experiment_id = exp.experiment_id
+    it should pass args and kwargs as inputs, also
+    it should pass default values
+    """
+    exp = mlflow.set_experiment("test_add_tracing_arguments_passed_to_span")
+    experiment_id = exp.experiment_id
 
-        class MyClass:
-                @tracing.add_tracing(name="instance_method")
-                def instance_method(self, x):
-                        return x
+    class MyClass:
+        @tracing.add_tracing(name="instance_method")
+        def instance_method(self, x):
+            return x
 
-                @classmethod
-                @tracing.add_tracing(name="class_method")
-                def class_method(cls, x):
-                        return x
+        @classmethod
+        @tracing.add_tracing(name="class_method")
+        def class_method(cls, x):
+            return x
 
-        @tracing.add_tracing(name="args_kwargs")
-        def args_kwargs(*args, **kwargs):
-                return (args, kwargs)
+    @tracing.add_tracing(name="args_kwargs")
+    def args_kwargs(*args, **kwargs):
+        return (args, kwargs)
 
-        @tracing.add_tracing(name="fun_with_defaults")
-        def fun_with_defaults(x=10):
-                return x
+    @tracing.add_tracing(name="fun_with_defaults")
+    def fun_with_defaults(x=10):
+        return x
 
-        obj = MyClass()
-        obj.instance_method(1)
-        MyClass.class_method(1)
-        args_kwargs(1, y=2)
-        fun_with_defaults()
+    obj = MyClass()
+    obj.instance_method(1)
+    MyClass.class_method(1)
+    args_kwargs(1, y=2)
+    fun_with_defaults()
 
+    instance_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'instance_method'")[0]
+    class_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'class_method'")[0]
+    args_kwargs_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'args_kwargs'")[0]
+    fun_with_defaults_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'fun_with_defaults'")[0]
 
-        instance_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'instance_method'")[0]
-        class_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'class_method'")[0]
-        args_kwargs_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'args_kwargs'")[0]
-        fun_with_defaults_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'fun_with_defaults'")[0]
+    def get_inputs(trace):
+        return trace.data.spans[0].inputs
 
-        def get_inputs(trace):
-                return trace.data.spans[0].inputs
+    it_inputs = get_inputs(instance_trace)
+    assert it_inputs == {'x': 1}
 
-        it_inputs = get_inputs(instance_trace)
-        assert it_inputs == {'x': 1}
+    ct_inputs = get_inputs(class_trace)
+    assert ct_inputs == {'x': 1}
 
-        ct_inputs = get_inputs(class_trace)
-        assert ct_inputs == {'x': 1}
+    ak_inputs = get_inputs(args_kwargs_trace)
+    assert ak_inputs == {'args': [1], 'kwargs': {'y': 2}}
 
-        ak_inputs = get_inputs(args_kwargs_trace)
-        assert ak_inputs == {'args': [1], 'kwargs': {'y': 2}}
+    d_inputs = get_inputs(fun_with_defaults_trace)
+    assert d_inputs == {'x': 10}
 
-        d_inputs = get_inputs(fun_with_defaults_trace)
-        assert d_inputs == {'x': 10}
 
 def test_add_tracing_failed_inline_evaluator_logs_warning(setup_mlflow_tracking_server, tracing, mlflow, caplog):
-        """
-        if the inline evaluator fails, a warning is logged and the main code still executes
-        """
-        mlflow.set_experiment("test_add_tracing_failed_inline_evaluator_logs_warning")
+    """
+    if the inline evaluator fails, a warning is logged and the main code still executes
+    """
+    mlflow.set_experiment("test_add_tracing_failed_inline_evaluator_logs_warning")
 
-        def failing_trace_evaluator(t):
-                return 1/0
+    def failing_trace_evaluator(t):
+        return 1/0
 
-        def failing_evaluator(span):
-                return 1/0
+    def failing_evaluator(span):
+        return 1/0
 
-        @tracing.add_tracing(name="unit", evaluator=failing_evaluator, trace_evaluator=failing_trace_evaluator)
-        def unit(x):
-                return x
+    @tracing.add_tracing(name="unit", evaluator=failing_evaluator, trace_evaluator=failing_trace_evaluator)
+    def unit(x):
+        return x
+
+    with mlflow.start_run(), caplog.at_level(logger.ERROR):
+        assert unit(1) == 1
+        print(caplog.text)
+        assert "Inline evaluation failed for evaluator, failing_evaluator" in caplog.text
+        assert "Inline evaluation failed for trace_evaluator, failing_trace_evaluator" in caplog.text
 
-        with mlflow.start_run(), caplog.at_level(logger.ERROR):
-                assert unit(1) == 1
-                print(caplog.text)
-                assert "Inline evaluation failed for evaluator, failing_evaluator" in caplog.text
-                assert "Inline evaluation failed for trace_evaluator, failing_trace_evaluator" in caplog.text
 
 def test_add_tracing_works_with_generator(setup_mlflow_tracking_server, tracing, mlflow):
-        """
-        add_tracing should not record all result from a generator if not specified
-        if we don't eagerly load the reults onto one trace, we save a span for each yield
-        """
-        exp = mlflow.set_experiment("test_add_tracing_works_with_generator")
-        experiment_id = exp.experiment_id
-
-        @tracing.add_tracing(name="gen", evaluator=lambda span: { 'result': 1 }, eagerly_evaluate_streamed_results=False)
-        def gen():
-                for i in range(3):
-                        yield i
-
-        xs = [x for x in gen()]
-        assert xs == [0, 1, 2], "Results should be unaffected by tracing"
-
-        gen_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'gen'")[0]
-        assert len(gen_trace.data.spans) == 4, "should have 4 spans, one for function call, and one for each yield"
-        assert [s.outputs for s in gen_trace.data.spans[1:]] == [0, 1, 2], "yields spans should have correct outputs"
-        assert ["group_id" in s.attributes for s in gen_trace.data.spans[1:]] == [True, True, True], "yields spans should have a group_id attribute"
-        assert [s.attributes["index"] for s in gen_trace.data.spans[1:]] == [0, 1, 2]
-        assert len(set([s.attributes["group_id"] for s in gen_trace.data.spans[1:]])) == 1, "group_id should be the same for all yields"
-
-        # assert evaluation didn't happen inline
-        tags = gen_trace.info.tags
-        assert 'domino.prog.metric.result' not in tags
-        assert 'domino.internal.is_eval' not in tags
+    """
+    add_tracing should not record all result from a generator if not specified
+    if we don't eagerly load the reults onto one trace, we save a span for each yield
+    """
+    exp = mlflow.set_experiment("test_add_tracing_works_with_generator")
+    experiment_id = exp.experiment_id
+
+    @tracing.add_tracing(name="gen", evaluator=lambda span: {'result': 1}, eagerly_evaluate_streamed_results=False)
+    def gen():
+        for i in range(3):
+            yield i
+
+    xs = [x for x in gen()]
+    assert xs == [0, 1, 2], "Results should be unaffected by tracing"
+
+    gen_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'gen'")[0]
+    assert len(gen_trace.data.spans) == 4, "should have 4 spans, one for function call, and one for each yield"
+    assert [s.outputs for s in gen_trace.data.spans[1:]] == [0, 1, 2], "yields spans should have correct outputs"
+    assert ["group_id" in s.attributes for s in gen_trace.data.spans[1:]] == [True, True, True], "yields spans should have a group_id attribute"
+    assert [s.attributes["index"] for s in gen_trace.data.spans[1:]] == [0, 1, 2]
+    assert len(set([s.attributes["group_id"] for s in gen_trace.data.spans[1:]])) == 1, "group_id should be the same for all yields"
+
+    # assert evaluation didn't happen inline
+    tags = gen_trace.info.tags
+    assert 'domino.prog.metric.result' not in tags
+    assert 'domino.internal.is_eval' not in tags
+
 
 def test_add_tracing_generator_trace_in_evaluator(setup_mlflow_tracking_server, tracing, mlflow, logging):
-        """
-        When using a generator, the trace should be accessible in the parent generator function's evaluator,
-        but not the child span's evaluator
-        """
-        exp = mlflow.set_experiment("test_add_tracing_generator_trace_in_evaluator")
-        experiment_id = exp.experiment_id
+    """
+    When using a generator, the trace should be accessible in the parent generator function's evaluator,
+    but not the child span's evaluator
+    """
+    exp = mlflow.set_experiment("test_add_tracing_generator_trace_in_evaluator")
+    experiment_id = exp.experiment_id
 
-        @tracing.add_tracing(name="parent", evaluator=lambda span: { 'span_exists_parent': 'True' }, trace_evaluator=lambda trace: { 'trace_exists_parent': 'True' })
-        def parent():
-                yield from child(1)
+    @tracing.add_tracing(name="parent", evaluator=lambda span: {'span_exists_parent': 'True'}, trace_evaluator=lambda trace: {'trace_exists_parent': 'True'})
+    def parent():
+        yield from child(1)
 
-        @tracing.add_tracing(name="child", evaluator=lambda span: { 'span_exists_child': 'True' }, trace_evaluator=lambda trace: { 'trace_exists_child': 'True' })
-        def child(x):
-                yield x
+    @tracing.add_tracing(name="child", evaluator=lambda span: {'span_exists_child': 'True'}, trace_evaluator=lambda trace: {'trace_exists_child': 'True'})
+    def child(x):
+        yield x
 
+    with logging.DominoRun() as run:
+        [_ for _ in parent()]
 
-        with logging.DominoRun() as run:
-                [_ for _ in parent()]
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+    evals = {r.name: r.value for r in parent_t.evaluation_results}
+    assert evals.get('trace_exists_parent') == 'True'
+    assert 'trace_exists_child' not in evals
+    assert evals.get('span_exists_parent') == 'True'
+    assert evals.get('span_exists_child') == 'True'
 
-        parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
-        evals = {r.name: r.value for r in parent_t.evaluation_results}
-        assert evals.get('trace_exists_parent') == 'True'
-        assert 'trace_exists_child' not in evals
-        assert evals.get('span_exists_parent') == 'True'
-        assert evals.get('span_exists_child') == 'True'
 
 def test_add_tracing_works_with_eagerly_evaluated_generator(setup_mlflow_tracking_server, tracing, mlflow):
-        """
-        add_tracing should record the result from a generator and evaluate it inline
-        """
-        exp = mlflow.set_experiment("test_add_tracing_works_with_eagerly_evaluated_generator")
-        experiment_id = exp.experiment_id
+    """
+    add_tracing should record the result from a generator and evaluate it inline
+    """
+    exp = mlflow.set_experiment("test_add_tracing_works_with_eagerly_evaluated_generator")
+    experiment_id = exp.experiment_id
 
-        @tracing.add_tracing(name="gen_record_all", evaluator=lambda span: { 'result': 1 })
-        def gen_record_all():
-                for i in range(3):
-                        yield i
+    @tracing.add_tracing(name="gen_record_all", evaluator=lambda span: {'result': 1})
+    def gen_record_all():
+        for i in range(3):
+            yield i
 
-        xs = [x for x in gen_record_all()]
-        assert xs == [0, 1, 2]
+    xs = [x for x in gen_record_all()]
+    assert xs == [0, 1, 2]
 
-        gen_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'gen_record_all'")[0]
-        span = gen_trace.data.spans[0]
-        tags = gen_trace.info.tags
+    gen_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'gen_record_all'")[0]
+    span = gen_trace.data.spans[0]
+    tags = gen_trace.info.tags
+
+    assert len(gen_trace.data.spans) == 1
+    assert span.outputs == [0, 1, 2]
+    assert tags['domino.prog.metric.result'] == '1'
+    assert tags['domino.internal.is_eval'] == 'true'
 
-        assert len(gen_trace.data.spans) == 1
-        assert span.outputs == [0, 1, 2]
-        assert tags['domino.prog.metric.result'] == '1'
-        assert tags['domino.internal.is_eval'] == 'true'
 
 @pytest.mark.asyncio
 async def test_add_tracing_works_with_async(setup_mlflow_tracking_server, mlflow, tracing):
-        exp = mlflow.set_experiment("test_add_tracing_works_with_async")
+    exp = mlflow.set_experiment("test_add_tracing_works_with_async")
+
+    @tracing.add_tracing(name="async_function", evaluator=lambda span: {'result': 1})
+    async def async_function(x):
+        return x
 
-        @tracing.add_tracing(name="async_function", evaluator=lambda span: { 'result': 1 })
-        async def async_function(x):
-                return x
+    res = await async_function(1)
+    assert res == 1
 
-        res = await async_function(1)
-        assert res == 1
+    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
 
-        traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    assert [t.data.spans[0].inputs for t in traces] == [{'x': 1}], "Inputs to trace should be the function arguments"
+    assert [t.data.spans[0].outputs for t in traces] == [1], "Outputs to trace should be the function return value"
 
-        assert [t.data.spans[0].inputs for t in traces] == [{'x':1}], "Inputs to trace should be the function arguments"
-        assert [t.data.spans[0].outputs for t in traces] == [1], "Outputs to trace should be the function return value"
 
 @pytest.mark.asyncio
 async def test_add_tracing_async_trace_in_evaluator(setup_mlflow_tracking_server, mlflow, tracing, logging):
-        """
-        When using async functions, the trace should be accessible in the parent function's evaluator
-        but not the child function's evaluator
-        """
-        exp = mlflow.set_experiment("test_add_tracing_async_trace_in_evaluator")
-
-        @tracing.add_tracing(name="parent", evaluator=lambda span: { 'span_exists_parent': 'True' }, trace_evaluator=lambda trace: { 'trace_exists_parent': 'True' })
-        async def parent(x):
-                return await child(x)
-
-        @tracing.add_tracing(name="child", evaluator=lambda span: { 'span_exists_child': 'True' }, trace_evaluator=lambda trace: { 'trace_exists_child': 'True'})
-        async def child(x):
-                return x
-
-        with logging.DominoRun() as run:
-                await parent(1)
-
-        parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
-        parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
-        evals = {r.name: r.value for r in parent_t.evaluation_results}
-        assert evals.get('trace_exists_parent') == 'True'
-        assert 'trace_exists_child' not in evals
-        assert evals.get('span_exists_parent') == 'True'
-        assert evals.get('span_exists_child') == 'True'
+    """
+    When using async functions, the trace should be accessible in the parent function's evaluator
+    but not the child function's evaluator
+    """
+    exp = mlflow.set_experiment("test_add_tracing_async_trace_in_evaluator")
+
+    @tracing.add_tracing(name="parent", evaluator=lambda span: {'span_exists_parent': 'True'}, trace_evaluator=lambda trace: {'trace_exists_parent': 'True'})
+    async def parent(x):
+        return await child(x)
+
+    @tracing.add_tracing(name="child", evaluator=lambda span: {'span_exists_child': 'True'}, trace_evaluator=lambda trace: {'trace_exists_child': 'True'})
+    async def child(x):
+        return x
+
+    with logging.DominoRun() as run:
+        await parent(1)
+
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+    evals = {r.name: r.value for r in parent_t.evaluation_results}
+    assert evals.get('trace_exists_parent') == 'True'
+    assert 'trace_exists_child' not in evals
+    assert evals.get('span_exists_parent') == 'True'
+    assert evals.get('span_exists_child') == 'True'
+
 
 def test_search_traces(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        @tracing.add_tracing(name="unit")
-        def unit(x):
-                return x
-
-        @tracing.add_tracing(name="parent", evaluator=lambda span: {'mymetric': 1, 'mylabel': 'category'})
-        def parent(x, y):
-                return unit(x) + unit(y)
-
-        @tracing.add_tracing(name="parent2")
-        def parent2(x):
-                return x
-
-        mlflow.set_experiment("test_search_traces")
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-                parent(1, 2)
-                parent2(1)
-
-        res = tracing.search_traces(run_id=run_id)
-        span_data = [(s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans]
-
-        assert sorted([trace.name for trace in res.data]) == sorted(["parent", "parent2"])
-        assert sorted([(t.name, t.value) for trace in res.data for t in trace.evaluation_results if trace.name == "parent"]) \
-                == sorted([("mylabel", "category"), ("mymetric", 1.0)])
-        assert len(span_data) == 4
-        assert sorted(span_data, key=lambda x: x[0]) == sorted([("parent", {'x':1, 'y': 2}, 3), \
-                ("parent2", {'x':1}, 1), ("unit_1", {'x':1}, 1), ("unit_2", {'x':2}, 2)
-        ], key=lambda x: x[0])
+    @tracing.add_tracing(name="unit")
+    def unit(x):
+        return x
 
-def test_search_traces_time_filter_warning(setup_mlflow_tracking_server, tracing, mlflow, logging, caplog):
-        """
-        if start time is > end time, warn the user
-        """
-        mlflow.set_experiment("test_search_traces_time_filter_warning")
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-
-        with caplog.at_level(logger.WARNING):
-                tracing.search_traces(run_id=run_id, start_time=datetime.now(), end_time=datetime.now() - timedelta(seconds=10))
-                assert f"start_time must be before end_time" in caplog.text
+    @tracing.add_tracing(name="parent", evaluator=lambda span: {'mymetric': 1, 'mylabel': 'category'})
+    def parent(x, y):
+        return unit(x) + unit(y)
 
-def test_search_traces_by_trace_name(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        @tracing.add_tracing(name="unit")
-        def unit(x):
-                return x
+    @tracing.add_tracing(name="parent2")
+    def parent2(x):
+        return x
 
-        @tracing.add_tracing(name="parent")
-        def parent(x, y):
-                return unit(x) + unit(y)
+    mlflow.set_experiment("test_search_traces")
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
+        parent(1, 2)
+        parent2(1)
 
-        @tracing.add_tracing(name="parent2")
-        def parent2(x):
-                return x
+    res = tracing.search_traces(run_id=run_id)
+    span_data = [(s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans]
 
-        mlflow.set_experiment("test_search_traces_by_trace_name")
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-                parent(1, 2)
-                parent2(1)
+    assert sorted([trace.name for trace in res.data]) == sorted(["parent", "parent2"])
+    assert sorted([(t.name, t.value) for trace in res.data for t in trace.evaluation_results if trace.name == "parent"]) \
+        == sorted([("mylabel", "category"), ("mymetric", 1.0)])
+    assert len(span_data) == 4
+    assert sorted(span_data, key=lambda x: x[0]) == sorted([("parent", {'x': 1, 'y': 2}, 3),
+                                                            ("parent2", {'x': 1}, 1), ("unit_1", {'x': 1}, 1), ("unit_2", {'x': 2}, 2)
+                                                            ], key=lambda x: x[0])
 
-        res = tracing.search_traces(run_id=run_id, trace_name="parent")
-        span_data = [(s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans]
 
-        assert [trace.name for trace in res.data] == ["parent"]
-        assert len(span_data) == 3
-        assert sorted(span_data, key=lambda x: x[0]) == sorted([("parent", {'x':1, 'y': 2}, 3), \
-                ("unit_1", {'x':1}, 1), ("unit_2", {'x':2}, 2)], key=lambda x: x[0])
+def test_search_traces_time_filter_warning(setup_mlflow_tracking_server, tracing, mlflow, logging, caplog):
+    """
+    if start time is > end time, warn the user
+    """
+    mlflow.set_experiment("test_search_traces_time_filter_warning")
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
 
-def test_search_traces_by_timestamp(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        @tracing.add_tracing(name="parent")
-        def parent(x):
-                return x
+    with caplog.at_level(logger.WARNING):
+        tracing.search_traces(run_id=run_id, start_time=datetime.now(), end_time=datetime.now() - timedelta(seconds=10))
+        assert "start_time must be before end_time" in caplog.text
 
-        mlflow.set_experiment("test_search_traces_by_timestamp")
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-                parent(1)
 
-                time.sleep(2)
+def test_search_traces_by_trace_name(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+    @tracing.add_tracing(name="unit")
+    def unit(x):
+        return x
 
-                parent(2)
+    @tracing.add_tracing(name="parent")
+    def parent(x, y):
+        return unit(x) + unit(y)
 
-                time.sleep(2)
+    @tracing.add_tracing(name="parent2")
+    def parent2(x):
+        return x
 
-                parent(3)
+    mlflow.set_experiment("test_search_traces_by_trace_name")
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
+        parent(1, 2)
+        parent2(1)
 
-        start_time = datetime.now() - timedelta(seconds=4)
-        end_time = datetime.now() - timedelta(seconds=2)
+    res = tracing.search_traces(run_id=run_id, trace_name="parent")
+    span_data = [(s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans]
 
-        res = tracing.search_traces(
-                run_id=run_id,
-                trace_name="parent",
-                start_time=start_time,
-                end_time=end_time
-        )
+    assert [trace.name for trace in res.data] == ["parent"]
+    assert len(span_data) == 3
+    assert sorted(span_data, key=lambda x: x[0]) == sorted([("parent", {'x': 1, 'y': 2}, 3),
+                                                            ("unit_1", {'x': 1}, 1), ("unit_2", {'x': 2}, 2)], key=lambda x: x[0])
 
-        assert [trace.name for trace in res.data] == ["parent"]
-        assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res.data] == [[("parent", 2, 2)]]
 
-def test_search_traces_with_traces_made_2hrs_ago(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        exp = mlflow.set_experiment("test_search_traces_with_traces_made_2hrs_ago")
-
-        def parent(x):
-                dt = datetime.now() - timedelta(hours=2)
-                ns = int(dt.timestamp() * 1e9)
-                span = mlflow.start_span_no_context(name="parent",  inputs=1, experiment_id=exp.experiment_id, start_time_ns=ns)
-                span.end()
-                return x
-
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-                parent(1)
-
-        res = tracing.search_traces(
-                run_id=run_id,
-                trace_name="parent",
-        )
+def test_search_traces_by_timestamp(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+    @tracing.add_tracing(name="parent")
+    def parent(x):
+        return x
 
-        assert [trace.name for trace in res.data] == ["parent"]
+    mlflow.set_experiment("test_search_traces_by_timestamp")
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
+        parent(1)
 
-        # If i shorten the time filter, I get no results
-        recent_res = tracing.search_traces(
-                run_id=run_id,
-                trace_name="parent",
-                start_time=datetime.now() - timedelta(hours=1),
-        )
-        assert recent_res.data == []
+        time.sleep(2)
 
-def test_search_traces_multiple_runs_in_exp(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        exp = mlflow.set_experiment("test_search_traces_multiple_runs_in_exp")
+        parent(2)
 
-        @tracing.add_tracing(name="unit1")
-        def unit1(x):
-                return x
+        time.sleep(2)
 
-        @tracing.add_tracing(name="unit2")
-        def unit2(x):
-                return x
+        parent(3)
 
-        run_1_id = None
-        with logging.DominoRun() as run:
-                run_1_id = run.info.run_id
-                unit1(1)
+    start_time = datetime.now() - timedelta(seconds=4)
+    end_time = datetime.now() - timedelta(seconds=2)
 
-        with logging.DominoRun() as run:
-                unit2(1)
+    res = tracing.search_traces(
+            run_id=run_id,
+            trace_name="parent",
+            start_time=start_time,
+            end_time=end_time
+    )
 
-        res = tracing.search_traces(run_id=run_1_id)
+    assert [trace.name for trace in res.data] == ["parent"]
+    assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res.data] == [[("parent", 2, 2)]]
 
-        assert [trace.name for trace in res.data] == ["unit1"]
 
-def test_search_traces_agent(setup_mlflow_tracking_server_no_env_var_mock, mlflow, tracing):
-        """
-        Can filter by agent id alone or id and version
-        """
-        app_id = "test_search_traces_agent_id"
-        app_version_1 = "test_search_traces_agent_version_1"
-        app_version_2 = "test_search_traces_agent_version_2"
+def test_search_traces_with_traces_made_2hrs_ago(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+    exp = mlflow.set_experiment("test_search_traces_with_traces_made_2hrs_ago")
 
-        fixture_create_prod_traces(app_id, app_version_1, "one", tracing)
-        fixture_create_prod_traces(app_id, app_version_2, "two", tracing)
+    def parent(x):
+        dt = datetime.now() - timedelta(hours=2)
+        ns = int(dt.timestamp() * 1e9)
+        span = mlflow.start_span_no_context(name="parent", inputs=1, experiment_id=exp.experiment_id, start_time_ns=ns)
+        span.end()
+        return x
 
-        def get_trace_names(traces):
-                return sorted([trace.name for trace in traces.data])
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
+        parent(1)
 
-        all_traces = tracing.search_agent_traces(agent_id=app_id)
-        assert get_trace_names(all_traces) == ["one", "two"], "Can get traces for all agent versions"
+    res = tracing.search_traces(
+            run_id=run_id,
+            trace_name="parent",
+    )
 
-        v1_traces = tracing.search_agent_traces(agent_id=app_id, agent_version=app_version_1)
-        assert get_trace_names(v1_traces) == ["one"], "Can get traces for just agent version 1"
+    assert [trace.name for trace in res.data] == ["parent"]
 
-        v2_traces = tracing.search_agent_traces(agent_id=app_id, agent_version=app_version_2)
-        assert get_trace_names(v2_traces) == ["two"], "Can get traces for just agent version 2"
+    # If i shorten the time filter, I get no results
+    recent_res = tracing.search_traces(
+            run_id=run_id,
+            trace_name="parent",
+            start_time=datetime.now() - timedelta(hours=1),
+    )
+    assert recent_res.data == []
 
-def test_search_traces_agent_agent_id_required(setup_mlflow_tracking_server_no_env_var_mock):
-        """
-        agent id is required if version supplied
-        """
 
-        with pytest.raises(Exception) as e_info:
-                _search_traces(agent_version="fakeversion")
+def test_search_traces_multiple_runs_in_exp(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+    exp = mlflow.set_experiment("test_search_traces_multiple_runs_in_exp")
 
-        assert "agent_id must also be provided" in str(e_info), "Should raise if version provided without id"
+    @tracing.add_tracing(name="unit1")
+    def unit1(x):
+        return x
 
-def test_search_traces_no_run_agent_ids_supplied(setup_mlflow_tracking_server_no_env_var_mock, tracing):
-        """
-        should throw if no run id, agent version, or id supplied
-        """
+    @tracing.add_tracing(name="unit2")
+    def unit2(x):
+        return x
 
-        with pytest.raises(Exception) as e_info:
-                _search_traces()
+    run_1_id = None
+    with logging.DominoRun() as run:
+        run_1_id = run.info.run_id
+        unit1(1)
 
-        assert "Either run_id or agent_id and agent_version must be provided to search traces" in str(e_info), \
-                "Should raise no agent info or run info provided"
+    with logging.DominoRun() as run:
+        unit2(1)
 
-def test_search_traces_filters_should_work_together_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        """
-        When every filter is specified as well as pagination, the expected results should be returned
-        The test creates multiple differently named traces over the course of a few hours in an experiment
-        with multiple runs
-        """
-        exp = mlflow.set_experiment("test_search_traces_filters_should_work_together_dev")
+    res = tracing.search_traces(run_id=run_1_id)
 
-        @tracing.add_tracing(name="unit1")
-        def unit1(x):
-                return x
+    assert [trace.name for trace in res.data] == ["unit1"]
 
-        def create_span_at_time(name: str, inputs: int, hours_ago: int):
-                dt = datetime.now() - timedelta(hours=hours_ago)
-                ns = int(dt.timestamp() * 1e9)
-                span = mlflow.start_span_no_context(name=name, inputs=inputs, experiment_id=exp.experiment_id, start_time_ns=ns)
-                span.end()
 
-        @tracing.add_tracing(name="sum1")
-        def sum1(x, y):
-                return x + y
+def test_search_traces_agent(setup_mlflow_tracking_server_no_env_var_mock, mlflow, tracing):
+    """
+    Can filter by agent id alone or id and version
+    """
+    app_id = "test_search_traces_agent_id"
+    app_version_1 = "test_search_traces_agent_version_1"
+    app_version_2 = "test_search_traces_agent_version_2"
 
-        @tracing.add_tracing(name="unit2")
-        def unit2(x):
-                return x
+    fixture_create_prod_traces(app_id, app_version_1, "one", tracing)
+    fixture_create_prod_traces(app_id, app_version_2, "two", tracing)
 
-        run_1_id = None
-        with logging.DominoRun() as run:
-                run_1_id = run.info.run_id
+    def get_trace_names(traces):
+        return sorted([trace.name for trace in traces.data])
 
-                create_span_at_time(name="sum1", inputs=1, hours_ago=5)
+    all_traces = tracing.search_agent_traces(agent_id=app_id)
+    assert get_trace_names(all_traces) == ["one", "two"], "Can get traces for all agent versions"
 
-                # search_traces should return the following two spans
-                create_span_at_time(name="sum1", inputs=2, hours_ago=3)
-                create_span_at_time(name="sum1", inputs=3, hours_ago=2)
+    v1_traces = tracing.search_agent_traces(agent_id=app_id, agent_version=app_version_1)
+    assert get_trace_names(v1_traces) == ["one"], "Can get traces for just agent version 1"
 
-                unit1(1)
+    v2_traces = tracing.search_agent_traces(agent_id=app_id, agent_version=app_version_2)
+    assert get_trace_names(v2_traces) == ["two"], "Can get traces for just agent version 2"
 
-        with logging.DominoRun() as run:
-                unit2(1)
 
-        start_time = datetime.now() - timedelta(hours=4)
-        end_time = datetime.now() - timedelta(hours=1)
+def test_search_traces_agent_agent_id_required(setup_mlflow_tracking_server_no_env_var_mock):
+    """
+    agent id is required if version supplied
+    """
 
-        def get_traces(next_page_token):
-                return tracing.search_traces(
-                        run_id=run_1_id,
-                        trace_name="sum1",
-                        start_time=start_time,
-                        end_time=end_time,
-                        page_token=next_page_token,
-                        max_results=1
-                )
+    with pytest.raises(Exception) as e_info:
+        _search_traces(agent_version="fakeversion")
 
-        def get_span_data(page):
-                return [(trace.name, [s.inputs for s in trace.spans]) for trace in page.data]
+    assert "agent_id must also be provided" in str(e_info), "Should raise if version provided without id"
 
-        # should only return the first sum1 call in the run_1_id domino run
-        page1 = get_traces(None)
-        assert get_span_data(page1) == [("sum1", [2])], "Should return first call"
 
-        # should only return the second sum1 call in the run_1_id domino run
-        page2 = get_traces(page1.page_token)
-        assert get_span_data(page2) == [("sum1", [3])], "Should return second call"
+def test_search_traces_no_run_agent_ids_supplied(setup_mlflow_tracking_server_no_env_var_mock, tracing):
+    """
+    should throw if no run id, agent version, or id supplied
+    """
 
+    with pytest.raises(Exception) as e_info:
+        _search_traces()
 
-def test_search_traces_filters_should_work_together_prod(setup_mlflow_tracking_server_no_env_var_mock, mocker, mlflow, tracing, logging):
-        """
-        When searching by agent ID and version and when every filter is specified as well as pagination,
-        the expected results should be returned
-        The test creates multiple differently named traces over the course of a few hours in an experiment
-        with multiple runs
-        """
-        app_id = "test_search_traces_filters_should_work_together_prod"
-        app_version_1 = f"{app_id}_1"
-        app_version_2 = f"{app_id}_2"
-
-        fixture_create_prod_traces(app_id, app_version_1, "sum1", tracing, hours_ago=5)
+    assert "Either run_id or agent_id and agent_version must be provided to search traces" in str(e_info), \
+        "Should raise no agent info or run info provided"
 
-        # search_traces should return the following two spans
-        fixture_create_prod_traces(app_id, app_version_1, "sum1", tracing, hours_ago=2)
-        fixture_create_prod_traces(app_id, app_version_1, "sum1", tracing, hours_ago=3)
 
-        fixture_create_prod_traces(app_id, app_version_1, "unit1", tracing)
-        fixture_create_prod_traces(app_id, app_version_2, "unit2", tracing)
+def test_search_traces_filters_should_work_together_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+    """
+    When every filter is specified as well as pagination, the expected results should be returned
+    The test creates multiple differently named traces over the course of a few hours in an experiment
+    with multiple runs
+    """
+    exp = mlflow.set_experiment("test_search_traces_filters_should_work_together_dev")
 
-        start_time = datetime.now() - timedelta(hours=4)
-        end_time = datetime.now() - timedelta(hours=1)
+    @tracing.add_tracing(name="unit1")
+    def unit1(x):
+        return x
 
-        def get_traces(next_page_token):
-                return tracing.search_agent_traces(
-                        agent_id=app_id,
-                        agent_version=app_version_1,
-                        trace_name="sum1",
-                        start_time=start_time,
-                        end_time=end_time,
-                        page_token=next_page_token,
-                        max_results=1
-                )
+    def create_span_at_time(name: str, inputs: int, hours_ago: int):
+        dt = datetime.now() - timedelta(hours=hours_ago)
+        ns = int(dt.timestamp() * 1e9)
+        span = mlflow.start_span_no_context(name=name, inputs=inputs, experiment_id=exp.experiment_id, start_time_ns=ns)
+        span.end()
 
-        def get_span_data(page):
-                return [(trace.name, [s.inputs for s in trace.spans]) for trace in page.data]
+    @tracing.add_tracing(name="sum1")
+    def sum1(x, y):
+        return x + y
 
-        # should only return the first sum1 call
-        page1 = get_traces(None)
-        assert get_span_data(page1) == [("sum1", [3])], "Should return first call"
+    @tracing.add_tracing(name="unit2")
+    def unit2(x):
+        return x
 
-        # should only return the second sum1 call
-        page2 = get_traces(page1.page_token)
-        assert get_span_data(page2) == [("sum1", [2])], "Should return second call"
+    run_1_id = None
+    with logging.DominoRun() as run:
+        run_1_id = run.info.run_id
 
+        create_span_at_time(name="sum1", inputs=1, hours_ago=5)
 
-def test_search_traces_pagination(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        """
-        The api should provide a page token in if the total number of results is bigger than the max results
-        and you can use that token to get the next page of results
-        """
-        @tracing.add_tracing(name="parent")
-        def parent(x):
-                return x
-
-        mlflow.set_experiment("test_search_traces_by_timestamp")
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-                parent(1)
-                parent(2)
-
-        res1 = tracing.search_traces(
-                run_id=run_id,
-                max_results=1,
-        )
+        # search_traces should return the following two spans
+        create_span_at_time(name="sum1", inputs=2, hours_ago=3)
+        create_span_at_time(name="sum1", inputs=3, hours_ago=2)
 
-        assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res1.data] == [[("parent", 1, 1)]]
+        unit1(1)
 
-        res2 = tracing.search_traces(
-                run_id=run_id,
-                max_results=1,
-                page_token=res1.page_token
-        )
+    with logging.DominoRun() as run:
+        unit2(1)
 
-        assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res2.data] == [[("parent", 2, 2)]]
+    start_time = datetime.now() - timedelta(hours=4)
+    end_time = datetime.now() - timedelta(hours=1)
 
-def test_search_traces_from_lazy_generator(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-        @tracing.add_tracing(name="parent", eagerly_evaluate_streamed_results=False)
-        def parent():
-                for i in range(3):
-                        yield i
-
-        mlflow.set_experiment("test_search_traces_from_lazy_generator")
-        run_id = None
-        with logging.DominoRun() as run:
-                run_id = run.info.run_id
-                # traces don't emit unless you consume generator
-                [x for x in parent()]
-
-        traces = tracing.search_traces(
-                run_id=run_id,
+    def get_traces(next_page_token):
+        return tracing.search_traces(
+                run_id=run_1_id,
+                trace_name="sum1",
+                start_time=start_time,
+                end_time=end_time,
+                page_token=next_page_token,
+                max_results=1
         )
 
-        assert len(traces.data) == 1
-        assert len(traces.data[0].spans) == 4
+    def get_span_data(page):
+        return [(trace.name, [s.inputs for s in trace.spans]) for trace in page.data]
 
+    # should only return the first sum1 call in the run_1_id domino run
+    page1 = get_traces(None)
+    assert get_span_data(page1) == [("sum1", [2])], "Should return first call"
 
-def test_init_tracing_triggers_one_get_experiment_by_name_calls_in_threads(setup_mlflow_tracking_server, mlflow, tracing):
-        """
-        init_tracing should call mlflow.set_experiment once
-        when invoked concurrently from two threads and traces should go to the
-        right experiment anyway
-        """
-        app_id = "concurrency_app"
-        env_vars = TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
-        expected_experiment_name = build_agent_experiment_name(app_id)
+    # should only return the second sum1 call in the run_1_id domino run
+    page2 = get_traces(page1.page_token)
+    assert get_span_data(page2) == [("sum1", [3])], "Should return second call"
 
-        reset_prod_tracing()
 
-        with patch.dict(os.environ, env_vars, clear=True):
+def test_search_traces_filters_should_work_together_prod(setup_mlflow_tracking_server_no_env_var_mock, mocker, mlflow, tracing, logging):
+    """
+    When searching by agent ID and version and when every filter is specified as well as pagination,
+    the expected results should be returned
+    The test creates multiple differently named traces over the course of a few hours in an experiment
+    with multiple runs
+    """
+    app_id = "test_search_traces_filters_should_work_together_prod"
+    app_version_1 = f"{app_id}_1"
+    app_version_2 = f"{app_id}_2"
+
+    fixture_create_prod_traces(app_id, app_version_1, "sum1", tracing, hours_ago=5)
+
+    # search_traces should return the following two spans
+    fixture_create_prod_traces(app_id, app_version_1, "sum1", tracing, hours_ago=2)
+    fixture_create_prod_traces(app_id, app_version_1, "sum1", tracing, hours_ago=3)
+
+    fixture_create_prod_traces(app_id, app_version_1, "unit1", tracing)
+    fixture_create_prod_traces(app_id, app_version_2, "unit2", tracing)
+
+    start_time = datetime.now() - timedelta(hours=4)
+    end_time = datetime.now() - timedelta(hours=1)
+
+    def get_traces(next_page_token):
+        return tracing.search_agent_traces(
+                agent_id=app_id,
+                agent_version=app_version_1,
+                trace_name="sum1",
+                start_time=start_time,
+                end_time=end_time,
+                page_token=next_page_token,
+                max_results=1
+        )
 
-                def send_traces():
-                        tracing.init_tracing()
+    def get_span_data(page):
+        return [(trace.name, [s.inputs for s in trace.spans]) for trace in page.data]
 
-                        @tracing.add_tracing(name="do")
-                        def do():
-                                return 1
+    # should only return the first sum1 call
+    page1 = get_traces(None)
+    assert get_span_data(page1) == [("sum1", [3])], "Should return first call"
 
-                        do()
+    # should only return the second sum1 call
+    page2 = get_traces(page1.page_token)
+    assert get_span_data(page2) == [("sum1", [2])], "Should return second call"
 
-                # Spy on mlflow.set_experiment to ensure it is called once
-                with patch.object(
-                        mlflow,
-                        "set_experiment",
-                        wraps=mlflow.set_experiment,
-                ) as spy_set_experiment:
-                        t1 = threading.Thread(target=send_traces)
-                        t2 = threading.Thread(target=send_traces)
 
-                        t1.start()
-                        t2.start()
-                        t1.join()
-                        t2.join()
+def test_search_traces_pagination(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+    """
+    The api should provide a page token in if the total number of results is bigger than the max results
+    and you can use that token to get the next page of results
+    """
+    @tracing.add_tracing(name="parent")
+    def parent(x):
+        return x
 
-                        assert spy_set_experiment.call_count == 1, "set_experiment should be called once from init_tracing"
+    mlflow.set_experiment("test_search_traces_by_timestamp")
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
+        parent(1)
+        parent(2)
 
-                # Verify two traces named "do" were saved to the Agent experiment
-                exp = mlflow.get_experiment_by_name(expected_experiment_name)
-                traces = mlflow.search_traces(
-                        experiment_ids=[exp.experiment_id],
-                        filter_string="trace.name = 'do'",
-                        return_type='list',
-                )
+    res1 = tracing.search_traces(
+            run_id=run_id,
+            max_results=1,
+    )
 
-                # even though we don't re-initialize the experiment in both threads, the traces
-                # still go to the right experiment
-                assert len(traces) == 2, "Two traces named 'do' should be saved to the experiment"
+    assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res1.data] == [[("parent", 1, 1)]]
 
-def test_add_tracing_span_type_and_attributes(setup_mlflow_tracking_server, mlflow, tracing):
-        """
-        add_tracing should support span_type and attributes parameters
-        """
-        from mlflow.entities import SpanType
+    res2 = tracing.search_traces(
+            run_id=run_id,
+            max_results=1,
+            page_token=res1.page_token
+    )
 
-        exp = mlflow.set_experiment("test_add_tracing_span_type_and_attributes")
+    assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res2.data] == [[("parent", 2, 2)]]
 
-        @tracing.add_tracing(
-            name="llm_call",
-            span_type=SpanType.LLM,
-            attributes={"model": "gpt-4"}
-        )
-        def llm_call(prompt):
-            return f"Response to: {prompt}"
 
-        # Test that function works normally
-        result = llm_call("Hello")
-        assert result == "Response to: Hello"
+def test_search_traces_from_lazy_generator(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+    @tracing.add_tracing(name="parent", eagerly_evaluate_streamed_results=False)
+    def parent():
+        for i in range(3):
+            yield i
 
-        traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
-        assert len(traces) == 1, "Should create one trace"
+    mlflow.set_experiment("test_search_traces_from_lazy_generator")
+    run_id = None
+    with logging.DominoRun() as run:
+        run_id = run.info.run_id
+        # traces don't emit unless you consume generator
+        [x for x in parent()]
 
-        span = traces[0].data.spans[0]
-        assert span.span_type == "LLM", "Span type should be set to LLM"
-        assert span.attributes.get("model") == "gpt-4", "Custom attribute 'model' should be set on the span"
+    traces = tracing.search_traces(
+            run_id=run_id,
+    )
 
-def test_add_tracing_span_type_with_async_and_generator(setup_mlflow_tracking_server, mlflow, tracing):
-        """
-        span_type and attributes should work with async and generator functions
-        """
-        import asyncio
-        from mlflow.entities import SpanType
-
-        exp = mlflow.set_experiment("test_add_tracing_span_type_async_generator")
-
-        @tracing.add_tracing(
-            name="async_retriever",
-            span_type=SpanType.RETRIEVER,
-            attributes={"index": "vector_db"}
-        )
-        async def async_retriever(query):
-            return [f"doc_{query}"]
+    assert len(traces.data) == 1
+    assert len(traces.data[0].spans) == 4
 
-        @tracing.add_tracing(
-            name="generator_chain",
-            span_type=SpanType.CHAIN
+
+def test_init_tracing_triggers_one_get_experiment_by_name_calls_in_threads(setup_mlflow_tracking_server, mlflow, tracing):
+    """
+    init_tracing should call mlflow.set_experiment once
+    when invoked concurrently from two threads and traces should go to the
+    right experiment anyway
+    """
+    app_id = "concurrency_app"
+    env_vars = TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
+    expected_experiment_name = build_agent_experiment_name(app_id)
+
+    reset_prod_tracing()
+
+    with patch.dict(os.environ, env_vars, clear=True):
+
+        def send_traces():
+            tracing.init_tracing()
+
+            @tracing.add_tracing(name="do")
+            def do():
+                return 1
+
+            do()
+
+        # Spy on mlflow.set_experiment to ensure it is called once
+        with patch.object(
+                mlflow,
+                "set_experiment",
+                wraps=mlflow.set_experiment,
+        ) as spy_set_experiment:
+            t1 = threading.Thread(target=send_traces)
+            t2 = threading.Thread(target=send_traces)
+
+            t1.start()
+            t2.start()
+            t1.join()
+            t2.join()
+
+            assert spy_set_experiment.call_count == 1, "set_experiment should be called once from init_tracing"
+
+        # Verify two traces named "do" were saved to the Agent experiment
+        exp = mlflow.get_experiment_by_name(expected_experiment_name)
+        traces = mlflow.search_traces(
+                experiment_ids=[exp.experiment_id],
+                filter_string="trace.name = 'do'",
+                return_type='list',
         )
-        def generator_chain():
-            for i in range(2):
-                yield f"chunk_{i}"
 
-        # Test functions work correctly
-        result = asyncio.run(async_retriever("test"))
-        assert result == ["doc_test"]
+        # even though we don't re-initialize the experiment in both threads, the traces
+        # still go to the right experiment
+        assert len(traces) == 2, "Two traces named 'do' should be saved to the experiment"
 
-        gen_results = list(generator_chain())
-        assert gen_results == ["chunk_0", "chunk_1"]
 
-        # Test traces were created
-        traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
-        assert len(traces) >= 2, "Should create at least two traces"
+def test_add_tracing_span_type_and_attributes(setup_mlflow_tracking_server, mlflow, tracing):
+    """
+    add_tracing should support span_type and attributes parameters
+    """
+    from mlflow.entities import SpanType
 
-        async_trace = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list', filter_string="trace.name = 'async_retriever'")[0]
-        async_span = async_trace.data.spans[0]
-        assert async_span.span_type == "RETRIEVER", "Async span type should be RETRIEVER"
-        assert async_span.attributes.get("index") == "vector_db", "Async span attribute 'index' should be set"
+    exp = mlflow.set_experiment("test_add_tracing_span_type_and_attributes")
 
-        gen_trace = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list', filter_string="trace.name = 'generator_chain'")[0]
-        gen_span = gen_trace.data.spans[0]
-        assert gen_span.span_type == "CHAIN", "Generator span type should be CHAIN"
+    @tracing.add_tracing(
+        name="llm_call",
+        span_type=SpanType.LLM,
+        attributes={"model": "gpt-4"}
+    )
+    def llm_call(prompt):
+        return f"Response to: {prompt}"
 
-def test_add_tracing_custom_span_type_string(setup_mlflow_tracking_server, mlflow, tracing):
-        """
-        add_tracing should accept custom span type strings
-        """
-        exp = mlflow.set_experiment("test_add_tracing_custom_span_type")
-
-        @tracing.add_tracing(
-            name="custom_operation",
-            span_type="CUSTOM_OPERATION",
-            attributes={"operation_id": "op_123"}
-        )
-        def custom_operation():
-            return "custom result"
+    # Test that function works normally
+    result = llm_call("Hello")
+    assert result == "Response to: Hello"
 
-        # Test function works
-        result = custom_operation()
-        assert result == "custom result"
+    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    assert len(traces) == 1, "Should create one trace"
 
-        traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
-        assert len(traces) == 1, "Should create one trace"
+    span = traces[0].data.spans[0]
+    assert span.span_type == "LLM", "Span type should be set to LLM"
+    assert span.attributes.get("model") == "gpt-4", "Custom attribute 'model' should be set on the span"
 
-        span = traces[0].data.spans[0]
-        assert span.span_type == "CUSTOM_OPERATION", "Custom span type string should be preserved"
-        assert span.attributes.get("operation_id") == "op_123", "Custom attribute 'operation_id' should be set on the span"
+
+def test_add_tracing_span_type_with_async_and_generator(setup_mlflow_tracking_server, mlflow, tracing):
+    """
+    span_type and attributes should work with async and generator functions
+    """
+    from mlflow.entities import SpanType
+
+    exp = mlflow.set_experiment("test_add_tracing_span_type_async_generator")
+
+    @tracing.add_tracing(
+        name="async_retriever",
+        span_type=SpanType.RETRIEVER,
+        attributes={"index": "vector_db"}
+    )
+    async def async_retriever(query):
+        return [f"doc_{query}"]
+
+    @tracing.add_tracing(
+        name="generator_chain",
+        span_type=SpanType.CHAIN
+    )
+    def generator_chain():
+        for i in range(2):
+            yield f"chunk_{i}"
+
+    # Test functions work correctly
+    result = asyncio.run(async_retriever("test"))
+    assert result == ["doc_test"]
+
+    gen_results = list(generator_chain())
+    assert gen_results == ["chunk_0", "chunk_1"]
+
+    # Test traces were created
+    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    assert len(traces) >= 2, "Should create at least two traces"
+
+    async_trace = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list', filter_string="trace.name = 'async_retriever'")[0]
+    async_span = async_trace.data.spans[0]
+    assert async_span.span_type == "RETRIEVER", "Async span type should be RETRIEVER"
+    assert async_span.attributes.get("index") == "vector_db", "Async span attribute 'index' should be set"
+
+    gen_trace = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list', filter_string="trace.name = 'generator_chain'")[0]
+    gen_span = gen_trace.data.spans[0]
+    assert gen_span.span_type == "CHAIN", "Generator span type should be CHAIN"
+
+
+def test_add_tracing_custom_span_type_string(setup_mlflow_tracking_server, mlflow, tracing):
+    """
+    add_tracing should accept custom span type strings
+    """
+    exp = mlflow.set_experiment("test_add_tracing_custom_span_type")
+
+    @tracing.add_tracing(
+        name="custom_operation",
+        span_type="CUSTOM_OPERATION",
+        attributes={"operation_id": "op_123"}
+    )
+    def custom_operation():
+        return "custom result"
+
+    # Test function works
+    result = custom_operation()
+    assert result == "custom result"
+
+    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    assert len(traces) == 1, "Should create one trace"
+
+    span = traces[0].data.spans[0]
+    assert span.span_type == "CUSTOM_OPERATION", "Custom span type string should be preserved"
+    assert span.attributes.get("operation_id") == "op_123", "Custom attribute 'operation_id' should be set on the span"
diff --git a/tests/integration/agents/test_util.py b/tests/integration/agents/test_util.py
index fb4543d9..a9ecde09 100644
--- a/tests/integration/agents/test_util.py
+++ b/tests/integration/agents/test_util.py
@@ -1,6 +1,6 @@
 import domino.agents.tracing.inittracing as inittracing
 
-def reset_prod_tracing():
-        inittracing._is_prod_tracing_initialized = False
-        inittracing.triggered_autolog_frameworks = set()
 
+def reset_prod_tracing():
+    inittracing._is_prod_tracing_initialized = False
+    inittracing.triggered_autolog_frameworks = set()
diff --git a/tests/test_basic_auth.py b/tests/test_basic_auth.py
index 895fc3b9..ca932e14 100644
--- a/tests/test_basic_auth.py
+++ b/tests/test_basic_auth.py
@@ -104,6 +104,7 @@ def test_object_creation_with_api_proxy():
     ), "Authentication using API proxy should be of type domino.authentication.ProxyAuth"
     assert d.request_manager.auth.api_proxy == "http://localhost:1234"
 
+
 @pytest.mark.usefixtures("mock_domino_version_response", "clear_token_file_from_env", "mock_proxy_response_https")
 def test_object_creation_with_api_proxy_with_scheme():
     """
diff --git a/tests/test_datasets.py b/tests/test_datasets.py
index c18c2649..6f549999 100644
--- a/tests/test_datasets.py
+++ b/tests/test_datasets.py
@@ -339,7 +339,7 @@ def test_datasets_upload_mixed_slash_path(mock_exists, default_domino_client):
     assert "back\\slash.txt" in os.listdir("tests/assets")
     local_path_to_file = "tests/assets/back\\slash.txt"
     response = default_domino_client.datasets_upload_files(datasets_id,
-                                                            local_path_to_file)
+                                                           local_path_to_file)
     assert "back\\slash.txt" in response
 
 
@@ -355,7 +355,7 @@ def test_datasets_upload_windows_path(mock_exists, default_domino_client):
     assert "test_datasets.py" in os.listdir("tests")
     windows_local_path_to_file = "tests\\test_datasets.py"
     response = default_domino_client.datasets_upload_files(datasets_id,
-                                                            windows_local_path_to_file)
+                                                           windows_local_path_to_file)
     assert "test_datasets.py" in response
 
 
@@ -389,7 +389,7 @@ def test_datasets_upload_directory_windows_path(mock_exists, default_domino_clie
     assert os.path.isdir("tests/assets")
     windows_local_path_to_dir = "tests/assets"
     response = default_domino_client.datasets_upload_files(datasets_id,
-                                                            windows_local_path_to_dir)
+                                                           windows_local_path_to_dir)
     assert "tests/assets" in response
 
 
diff --git a/tests/test_deprecations.py b/tests/test_deprecations.py
index b3acf8b6..4da9cd73 100644
--- a/tests/test_deprecations.py
+++ b/tests/test_deprecations.py
@@ -49,8 +49,8 @@ def test_publishApiEndpoint_warns(self, client):
     def test_new_names_accepted_without_warning(self, client, recwarn):
         client.runs_start("main.py", is_direct=True, commit_id=MOCK_COMMIT_ID)
         deprecation_warnings = [w for w in recwarn.list if issubclass(w.category, DeprecationWarning)
-                                 and "deprecated" in str(w.message).lower()
-                                 and any(x in str(w.message) for x in ["isDirect", "commitId"])]
+                                and "deprecated" in str(w.message).lower()
+                                and any(x in str(w.message) for x in ["isDirect", "commitId"])]
         assert len(deprecation_warnings) == 0
 
 
diff --git a/tests/test_domino.py b/tests/test_domino.py
index da244530..3e3bd069 100644
--- a/tests/test_domino.py
+++ b/tests/test_domino.py
@@ -22,18 +22,19 @@ def test_versioning(requests_mock, dummy_hostname):
     with pytest.raises(Exception):
         dom.requires_at_least("5.11.0")
 
+
 def test_request_session(test_auth_base):
-     request_manager = _HttpRequestManager(auth=test_auth_base)
-     start_time = time.time()
-     try:
-         response = request_manager.request_session.get(
-             'https://localhost:9999' # ConnectionError
-         )
-     except Exception as ex:
-         print('It failed :(', ex.__class__.__name__)
-     else:
-         print('It eventually worked', response.status_code)
-     finally:
-         end_time = time.time()
-         total_time = end_time - start_time
-         assert(total_time > 5) # actual value should be around 6.0210....
+    request_manager = _HttpRequestManager(auth=test_auth_base)
+    start_time = time.time()
+    try:
+        response = request_manager.request_session.get(
+            'https://localhost:9999'  # ConnectionError
+        )
+    except Exception as ex:
+        print('It failed :(', ex.__class__.__name__)
+    else:
+        print('It eventually worked', response.status_code)
+    finally:
+        end_time = time.time()
+        total_time = end_time - start_time
+        assert (total_time > 5)  # actual value should be around 6.0210....
diff --git a/tests/test_spark_operator.py b/tests/test_spark_operator.py
index 6c8769bd..00f14557 100644
--- a/tests/test_spark_operator.py
+++ b/tests/test_spark_operator.py
@@ -9,7 +9,6 @@
 from airflow import DAG
 from airflow.models import TaskInstance
 import pytest
-from pprint import pformat
 
 from domino.airflow import DominoSparkOperator
 from domino.exceptions import RunFailedException
@@ -18,6 +17,7 @@
 TEST_PROJECT = os.environ.get("DOMINO_SPARK_TEST_PROJECT")
 dag_id = "test_spark_operator"
 
+
 @pytest.mark.skipif(os.getenv("SPARK_DEP") != "yes", reason="Extra dependency required")
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
@@ -36,6 +36,7 @@ def test_spark_operator_no_cluster():
     ti = TaskInstance(task=task, execution_date=execution_dt)
     task.execute(ti.get_template_context())
 
+
 @pytest.mark.skipif(os.getenv("SPARK_DEP") != "yes", reason="Extra dependency required")
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
@@ -58,6 +59,7 @@ def test_spark_operator_with_cluster(spark_cluster_env_id):
     ti = TaskInstance(task=task, execution_date=execution_dt)
     task.execute(ti.get_template_context())
 
+
 @pytest.mark.skipif(os.getenv("SPARK_DEP") != "yes", reason="Extra dependency required")
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
@@ -83,6 +85,7 @@ def test_spark_operator_with_compute_cluster_properties(spark_cluster_env_id):
     ti = TaskInstance(task=task, execution_date=execution_dt)
     task.execute(ti.get_template_context())
 
+
 @pytest.mark.skipif(os.getenv("SPARK_DEP") != "yes", reason="Extra dependency required")
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"

From e1a903e97d201e8a308c7c6bf0151185d9afb242 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 17:55:40 +0100
Subject: [PATCH 08/20] update pre-commit hook versions to latest

---
 .pre-commit-config.yaml | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 83169423..fdd87e63 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -8,7 +8,7 @@ repos:
         files: ^domino/.*\.py$
         pass_filenames: true
   - repo: https://github.com/pre-commit/pre-commit-hooks
-    rev: v4.0.1
+    rev: v5.0.0
     hooks:
       - id: check-case-conflict
       - id: check-merge-conflict
@@ -19,21 +19,20 @@ repos:
       - id: no-commit-to-branch
         args: [-b, master]
       - id: trailing-whitespace
-  - repo: https://gitlab.com/pycqa/flake8
-    rev: 3.9.2
+  - repo: https://github.com/PyCQA/flake8
+    rev: 7.2.0
     hooks:
       - id: flake8
   - repo: https://github.com/PyCQA/isort
-    rev: 5.9.3
+    rev: 5.13.2
     hooks:
       - id: isort
   - repo: https://github.com/psf/black
-    rev: 22.3.0
+    rev: 25.1.0
     hooks:
       - id: black
-        language_version: python # Should be a command that runs python3.6+
   - repo: https://github.com/pre-commit/mirrors-mypy
-    rev: v0.910
+    rev: v1.15.0
     hooks:
       - id: mypy
         args:
@@ -43,6 +42,7 @@ repos:
             --explicit-package-bases,
             --ignore-missing-imports,
             --follow-imports=silent,
+            --python-version=3.10,
           ]
         additional_dependencies:
           - "types-pyyaml"
@@ -53,7 +53,6 @@ repos:
           - "types-python-dateutil"
           - "types-redis"
           - "types-protobuf"
-          - "types-python-dateutil"
           - "types-frozendict"
           - "types-typing-extensions"
           - "types-urllib3"

From 321eae5ef5e3325ad49fbb0fbc1b76cadf28a423 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 17:56:28 +0100
Subject: [PATCH 09/20] resolve all flake8,isort,black errors across the
 codebase

---
 .flake8                                       |    3 +
 docs/source/conf.py                           |   41 +-
 domino/_custom_metrics.py                     |   22 +-
 domino/_impl/custommetrics/__init__.py        |   22 +-
 domino/_impl/custommetrics/api_client.py      |  640 +++++----
 .../_impl/custommetrics/apis/path_to_api.py   |   10 +-
 ...ic_values_v1_model_monitoring_id_metric.py |    4 +-
 domino/_impl/custommetrics/apis/tag_to_api.py |    4 +-
 .../apis/tags/custom_metrics_api.py           |   15 +-
 domino/_impl/custommetrics/configuration.py   |  116 +-
 domino/_impl/custommetrics/exceptions.py      |   26 +-
 .../model/failure_envelope_v1.py              |  117 +-
 .../model/failure_envelope_v1.pyi             |  142 +-
 .../model/invalid_body_envelope_v1.py         |   65 +-
 .../model/invalid_body_envelope_v1.pyi        |   81 +-
 .../_impl/custommetrics/model/metadata_v1.py  |  117 +-
 .../_impl/custommetrics/model/metadata_v1.pyi |  142 +-
 .../model/metric_alert_request_v1.py          |  145 +-
 .../model/metric_alert_request_v1.pyi         |  165 ++-
 .../custommetrics/model/metric_tag_v1.py      |   94 +-
 .../custommetrics/model/metric_tag_v1.pyi     |  110 +-
 .../custommetrics/model/metric_value_v1.py    |  130 +-
 .../custommetrics/model/metric_value_v1.pyi   |  155 +-
 .../model/metric_values_envelope_v1.py        |  116 +-
 .../model/metric_values_envelope_v1.pyi       |  137 +-
 .../model/new_metric_value_v1.py              |  158 ++-
 .../model/new_metric_value_v1.pyi             |  187 ++-
 .../model/new_metric_values_envelope_v1.py    |   85 +-
 .../model/new_metric_values_envelope_v1.pyi   |  107 +-
 .../custommetrics/model/target_range_v1.py    |  113 +-
 .../custommetrics/model/target_range_v1.pyi   |  142 +-
 domino/_impl/custommetrics/models/__init__.py |   16 +-
 domino/_impl/custommetrics/paths/__init__.py  |    4 +-
 .../paths/api_metric_alerts_v1/post.py        |  195 +--
 .../paths/api_metric_alerts_v1/post.pyi       |  221 ++-
 .../paths/api_metric_values_v1/post.py        |  195 +--
 .../paths/api_metric_values_v1/post.pyi       |  221 ++-
 .../get.py                                    |  208 +--
 .../get.pyi                                   |  231 ++-
 domino/_impl/custommetrics/rest.py            |  211 +--
 domino/_impl/custommetrics/schemas.py         | 1242 +++++++++++------
 domino/agents/_eval_tags.py                   |    2 +-
 domino/agents/_verify_domino_support.py       |    8 +-
 domino/agents/logging/__init__.py             |    2 +-
 domino/agents/logging/dominorun.py            |   16 +-
 domino/agents/logging/logging.py              |   10 +-
 domino/agents/read_agent_config.py            |    3 +-
 domino/agents/tracing/__init__.py             |   10 +-
 domino/agents/tracing/inittracing.py          |    6 +-
 domino/agents/tracing/tracing.py              |   46 +-
 domino/airflow/_operator.py                   |    4 +-
 domino/authentication.py                      |   22 +-
 domino/constants.py                           |    1 +
 domino/datasets.py                            |  140 +-
 domino/domino.py                              |  460 +++---
 domino/http_request_manager.py                |    3 +-
 domino/routes.py                              |   86 +-
 examples/example_budget_manager.py            |   22 +-
 examples/models_and_environments.py           |    2 +-
 pyproject.toml                                |    5 +
 scripts/check_snake_case.py                   |    1 +
 setup.py                                      |    2 +-
 .../test_models/test_failure_envelope_v1.py   |   11 +-
 .../test_invalid_body_envelope_v1.py          |   11 +-
 .../test_models/test_metadata_v1.py           |   11 +-
 .../test_metric_alert_request_v1.py           |   11 +-
 .../test_models/test_metric_tag_v1.py         |   11 +-
 .../test_models/test_metric_value_v1.py       |   11 +-
 .../test_metric_values_envelope_v1.py         |   11 +-
 .../test_models/test_new_metric_value_v1.py   |   11 +-
 .../test_new_metric_values_envelope_v1.py     |   11 +-
 .../test_models/test_target_range_v1.py       |   11 +-
 .../custommetrics/test_paths/__init__.py      |   37 +-
 .../test_api_metric_alerts_v1/test_post.py    |   10 +-
 .../test_api_metric_values_v1/test_post.py    |   10 +-
 .../test_get.py                               |   12 +-
 tests/agents/test_agents_eval_tags.py         |    8 +-
 tests/agents/test_read_agent_config.py        |   41 +-
 tests/agents/test_verify_domino_support.py    |  110 +-
 tests/conftest.py                             |    3 +-
 tests/integration/agents/conftest.py          |   61 +-
 tests/integration/agents/mlflow_fixtures.py   |   50 +-
 tests/integration/agents/test_domino_run.py   |  227 +--
 tests/integration/agents/test_logging.py      |   55 +-
 tests/integration/agents/test_tracing.py      |  660 ++++++---
 tests/test_app.py                             |   18 +-
 tests/test_apps.py                            |    9 +-
 tests/test_basic_auth.py                      |   18 +-
 tests/test_collaborators.py                   |    4 +-
 tests/test_custom_metrics.py                  |   29 +-
 tests/test_datasets.py                        |   74 +-
 tests/test_deprecations.py                    |   41 +-
 tests/test_domino.py                          |   15 +-
 tests/test_endpoints.py                       |    8 +-
 tests/test_environments.py                    |    2 +
 tests/test_finops.py                          |  100 +-
 tests/test_helpers.py                         |   15 +-
 tests/test_jobs.py                            |   26 +-
 tests/test_operator.py                        |   10 +-
 tests/test_project_id.py                      |    7 +-
 tests/test_projects.py                        |   13 +-
 tests/test_spark_operator.py                  |    4 +-
 102 files changed, 5693 insertions(+), 3100 deletions(-)
 create mode 100644 pyproject.toml

diff --git a/.flake8 b/.flake8
index 1cfb3043..21df6f28 100644
--- a/.flake8
+++ b/.flake8
@@ -21,3 +21,6 @@ ignore =
     E203,
     # Line lengths are recommended to be no greater than 79 characters
     E501,
+per-file-ignores =
+    # Auto-generated OpenAPI client — forward-reference annotations flagged as undefined
+    domino/_impl/custommetrics/*.py: F821
diff --git a/docs/source/conf.py b/docs/source/conf.py
index c4f684d0..6e387104 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -9,14 +9,14 @@
 # -- General configuration ---------------------------------------------------
 extensions = [
     "sphinx.ext.autodoc",
-    'sphinx.ext.autosummary',
-    'sphinx.ext.napoleon',
+    "sphinx.ext.autosummary",
+    "sphinx.ext.napoleon",
 ]
 
 autodoc_default_options = {
-    'members': True,
-    'undoc-members': False,  # Don't show undocumented members
-    'show-inheritance': False,
+    "members": True,
+    "undoc-members": False,  # Don't show undocumented members
+    "show-inheritance": False,
 }
 
 # If you want Sphinx to evaluate forward refs safely
@@ -37,14 +37,29 @@
 # Mock heavy/optional dependencies to keep autodoc imports lightweight in CI
 autodoc_mock_imports = [
     "domino._impl",
-    "attrs", "yaml", "pytest",
-    "apache_airflow", "airflow",
-    "pandas", "numpy", "semver",
-    "mlflow", "mlflow_tracing", "mlflow-skinny",
-    "requests", "urllib3", "beautifulsoup4", "bs4",
-    "polling2", "typing_extensions", "frozendict", "python_dateutil", "dateutil",
-    "retry", "docker",
+    "attrs",
+    "yaml",
+    "pytest",
+    "apache_airflow",
+    "airflow",
+    "pandas",
+    "numpy",
+    "semver",
+    "mlflow",
+    "mlflow_tracing",
+    "mlflow-skinny",
+    "requests",
+    "urllib3",
+    "beautifulsoup4",
+    "bs4",
+    "polling2",
+    "typing_extensions",
+    "frozendict",
+    "python_dateutil",
+    "dateutil",
+    "retry",
+    "docker",
 ]
 
 # -- Options for HTML output -------------------------------------------------
-html_static_path = ['_static']
+html_static_path = ["_static"]
diff --git a/domino/_custom_metrics.py b/domino/_custom_metrics.py
index 4e1f3745..69197d7f 100644
--- a/domino/_custom_metrics.py
+++ b/domino/_custom_metrics.py
@@ -1,25 +1,25 @@
+import json
 from abc import ABC, abstractmethod
 from copy import deepcopy
-import json
-from typing import Dict, List, Any, Optional, Union
+from typing import Any, Dict, List, Optional, Union
 
-from ._impl.custommetrics.model.metric_value_v1 import MetricValueV1
+from ._impl.custommetrics import schemas
+from ._impl.custommetrics.api_client import SerializedRequestBody
 from ._impl.custommetrics.model.metric_alert_request_v1 import MetricAlertRequestV1
+from ._impl.custommetrics.model.metric_tag_v1 import MetricTagV1
+from ._impl.custommetrics.model.metric_value_v1 import MetricValueV1
+from ._impl.custommetrics.model.metric_values_envelope_v1 import MetricValuesEnvelopeV1
+from ._impl.custommetrics.model.new_metric_value_v1 import NewMetricValueV1
+from ._impl.custommetrics.model.new_metric_values_envelope_v1 import (
+    NewMetricValuesEnvelopeV1,
+)
 from ._impl.custommetrics.model.target_range_v1 import TargetRangeV1
 from ._impl.custommetrics.paths.api_metric_alerts_v1.post import (
     request_body_metric_alert_request_v1,
 )
-from ._impl.custommetrics.model.new_metric_values_envelope_v1 import (
-    NewMetricValuesEnvelopeV1,
-)
-from ._impl.custommetrics.model.new_metric_value_v1 import NewMetricValueV1
-from ._impl.custommetrics.model.metric_tag_v1 import MetricTagV1
 from ._impl.custommetrics.paths.api_metric_values_v1.post import (
     request_body_new_metric_values_envelope_v1,
 )
-from ._impl.custommetrics.model.metric_values_envelope_v1 import MetricValuesEnvelopeV1
-from ._impl.custommetrics.api_client import SerializedRequestBody
-from ._impl.custommetrics import schemas
 
 
 class _CustomMetricsClientBase(ABC):
diff --git a/domino/_impl/custommetrics/__init__.py b/domino/_impl/custommetrics/__init__.py
index d9593b53..2f90b2fc 100644
--- a/domino/_impl/custommetrics/__init__.py
+++ b/domino/_impl/custommetrics/__init__.py
@@ -3,12 +3,12 @@
 # flake8: noqa
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 __version__ = "1.0.0"
@@ -20,9 +20,11 @@
 from domino._impl.custommetrics.configuration import Configuration
 
 # import exceptions
-from domino._impl.custommetrics.exceptions import OpenApiException
-from domino._impl.custommetrics.exceptions import ApiAttributeError
-from domino._impl.custommetrics.exceptions import ApiTypeError
-from domino._impl.custommetrics.exceptions import ApiValueError
-from domino._impl.custommetrics.exceptions import ApiKeyError
-from domino._impl.custommetrics.exceptions import ApiException
+from domino._impl.custommetrics.exceptions import (
+    ApiAttributeError,
+    ApiException,
+    ApiKeyError,
+    ApiTypeError,
+    ApiValueError,
+    OpenApiException,
+)
diff --git a/domino/_impl/custommetrics/api_client.py b/domino/_impl/custommetrics/api_client.py
index 5bf0309a..cd9ebadc 100644
--- a/domino/_impl/custommetrics/api_client.py
+++ b/domino/_impl/custommetrics/api_client.py
@@ -1,46 +1,46 @@
 # coding: utf-8
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from dataclasses import dataclass
-from decimal import Decimal
-import enum
+import atexit
 import email
+import enum
+import io
 import json
 import os
-import io
-import atexit
-from multiprocessing.pool import ThreadPool
 import re
 import tempfile
 import typing
+from dataclasses import dataclass
+from decimal import Decimal
+from multiprocessing.pool import ThreadPool
+from urllib.parse import quote, urlparse
+
+import frozendict
 import typing_extensions
 import urllib3
 from urllib3._collections import HTTPHeaderDict
-from urllib.parse import urlparse, quote
 from urllib3.fields import RequestField as RequestFieldBase
 
-import frozendict
-
 from domino._impl.custommetrics import rest
 from domino._impl.custommetrics.configuration import Configuration
 from domino._impl.custommetrics.exceptions import ApiTypeError, ApiValueError
 from domino._impl.custommetrics.schemas import (
-    NoneClass,
+    BinarySchema,
     BoolClass,
-    Schema,
     FileIO,
-    BinarySchema,
+    NoneClass,
+    Schema,
+    Unset,
     date,
     datetime,
     none_type,
-    Unset,
     unset,
 )
 
@@ -53,7 +53,7 @@ def __eq__(self, other):
 
 
 class JSONEncoder(json.JSONEncoder):
-    compact_separators = (',', ':')
+    compact_separators = (",", ":")
 
     def default(self, obj):
         if isinstance(obj, str):
@@ -74,24 +74,26 @@ def default(self, obj):
             return {key: self.default(val) for key, val in obj.items()}
         elif isinstance(obj, (list, tuple)):
             return [self.default(item) for item in obj]
-        raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__))
+        raise ApiValueError(
+            "Unable to prepare type {} for serialization".format(obj.__class__.__name__)
+        )
 
 
 class ParameterInType(enum.Enum):
-    QUERY = 'query'
-    HEADER = 'header'
-    PATH = 'path'
-    COOKIE = 'cookie'
+    QUERY = "query"
+    HEADER = "header"
+    PATH = "path"
+    COOKIE = "cookie"
 
 
 class ParameterStyle(enum.Enum):
-    MATRIX = 'matrix'
-    LABEL = 'label'
-    FORM = 'form'
-    SIMPLE = 'simple'
-    SPACE_DELIMITED = 'spaceDelimited'
-    PIPE_DELIMITED = 'pipeDelimited'
-    DEEP_OBJECT = 'deepObject'
+    MATRIX = "matrix"
+    LABEL = "label"
+    FORM = "form"
+    SIMPLE = "simple"
+    SPACE_DELIMITED = "spaceDelimited"
+    PIPE_DELIMITED = "pipeDelimited"
+    DEEP_OBJECT = "deepObject"
 
 
 class PrefixSeparatorIterator:
@@ -101,10 +103,10 @@ def __init__(self, prefix: str, separator: str):
         self.prefix = prefix
         self.separator = separator
         self.first = True
-        if separator in {'.', '|', '%20'}:
+        if separator in {".", "|", "%20"}:
             item_separator = separator
         else:
-            item_separator = ','
+            item_separator = ","
         self.item_separator = item_separator
 
     def __iter__(self):
@@ -146,7 +148,9 @@ def __ref6570_item_value(in_data: typing.Any, percent_encode: bool):
         elif isinstance(in_data, dict) and not in_data:
             # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1
             return None
-        raise ApiValueError('Unable to generate a ref6570 item representation of {}'.format(in_data))
+        raise ApiValueError(
+            "Unable to generate a ref6570 item representation of {}".format(in_data)
+        )
 
     @staticmethod
     def _to_dict(name: str, value: str):
@@ -161,13 +165,20 @@ def __ref6570_str_float_int_expansion(
         percent_encode: bool,
         prefix_separator_iterator: PrefixSeparatorIterator,
         var_name_piece: str,
-        named_parameter_expansion: bool
+        named_parameter_expansion: bool,
     ) -> str:
         item_value = cls.__ref6570_item_value(in_data, percent_encode)
-        if item_value is None or (item_value == '' and prefix_separator_iterator.separator == ';'):
+        if item_value is None or (
+            item_value == "" and prefix_separator_iterator.separator == ";"
+        ):
             return next(prefix_separator_iterator) + var_name_piece
-        value_pair_equals = '=' if named_parameter_expansion else ''
-        return next(prefix_separator_iterator) + var_name_piece + value_pair_equals + item_value
+        value_pair_equals = "=" if named_parameter_expansion else ""
+        return (
+            next(prefix_separator_iterator)
+            + var_name_piece
+            + value_pair_equals
+            + item_value
+        )
 
     @classmethod
     def __ref6570_list_expansion(
@@ -178,20 +189,20 @@ def __ref6570_list_expansion(
         percent_encode: bool,
         prefix_separator_iterator: PrefixSeparatorIterator,
         var_name_piece: str,
-        named_parameter_expansion: bool
+        named_parameter_expansion: bool,
     ) -> str:
         item_values = [cls.__ref6570_item_value(v, percent_encode) for v in in_data]
         item_values = [v for v in item_values if v is not None]
         if not item_values:
             # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1
             return ""
-        value_pair_equals = '=' if named_parameter_expansion else ''
+        value_pair_equals = "=" if named_parameter_expansion else ""
         if not explode:
             return (
-                next(prefix_separator_iterator) +
-                var_name_piece +
-                value_pair_equals +
-                prefix_separator_iterator.item_separator.join(item_values)
+                next(prefix_separator_iterator)
+                + var_name_piece
+                + value_pair_equals
+                + prefix_separator_iterator.item_separator.join(item_values)
             )
         # exploded
         return next(prefix_separator_iterator) + next(prefix_separator_iterator).join(
@@ -207,27 +218,32 @@ def __ref6570_dict_expansion(
         percent_encode: bool,
         prefix_separator_iterator: PrefixSeparatorIterator,
         var_name_piece: str,
-        named_parameter_expansion: bool
+        named_parameter_expansion: bool,
     ) -> str:
-        in_data_transformed = {key: cls.__ref6570_item_value(val, percent_encode) for key, val in in_data.items()}
-        in_data_transformed = {key: val for key, val in in_data_transformed.items() if val is not None}
+        in_data_transformed = {
+            key: cls.__ref6570_item_value(val, percent_encode)
+            for key, val in in_data.items()
+        }
+        in_data_transformed = {
+            key: val for key, val in in_data_transformed.items() if val is not None
+        }
         if not in_data_transformed:
             # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1
             return ""
-        value_pair_equals = '=' if named_parameter_expansion else ''
+        value_pair_equals = "=" if named_parameter_expansion else ""
         if not explode:
             return (
-                next(prefix_separator_iterator) +
-                var_name_piece + value_pair_equals +
-                prefix_separator_iterator.item_separator.join(
-                    prefix_separator_iterator.item_separator.join(
-                        item_pair
-                    ) for item_pair in in_data_transformed.items()
+                next(prefix_separator_iterator)
+                + var_name_piece
+                + value_pair_equals
+                + prefix_separator_iterator.item_separator.join(
+                    prefix_separator_iterator.item_separator.join(item_pair)
+                    for item_pair in in_data_transformed.items()
                 )
             )
         # exploded
         return next(prefix_separator_iterator) + next(prefix_separator_iterator).join(
-            [key + '=' + val for key, val in in_data_transformed.items()]
+            [key + "=" + val for key, val in in_data_transformed.items()]
         )
 
     @classmethod
@@ -237,13 +253,13 @@ def _ref6570_expansion(
         in_data: typing.Any,
         explode: bool,
         percent_encode: bool,
-        prefix_separator_iterator: PrefixSeparatorIterator
+        prefix_separator_iterator: PrefixSeparatorIterator,
     ) -> str:
         """
         Separator is for separate variables like dict with explode true, not for array item separation
         """
-        named_parameter_expansion = prefix_separator_iterator.separator in {'&', ';'}
-        var_name_piece = variable_name if named_parameter_expansion else ''
+        named_parameter_expansion = prefix_separator_iterator.separator in {"&", ";"}
+        var_name_piece = variable_name if named_parameter_expansion else ""
         if type(in_data) in {str, float, int}:
             return cls.__ref6570_str_float_int_expansion(
                 variable_name,
@@ -252,7 +268,7 @@ def _ref6570_expansion(
                 percent_encode,
                 prefix_separator_iterator,
                 var_name_piece,
-                named_parameter_expansion
+                named_parameter_expansion,
             )
         elif isinstance(in_data, none_type):
             # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1
@@ -265,7 +281,7 @@ def _ref6570_expansion(
                 percent_encode,
                 prefix_separator_iterator,
                 var_name_piece,
-                named_parameter_expansion
+                named_parameter_expansion,
             )
         elif isinstance(in_data, dict):
             return cls.__ref6570_dict_expansion(
@@ -275,10 +291,12 @@ def _ref6570_expansion(
                 percent_encode,
                 prefix_separator_iterator,
                 var_name_piece,
-                named_parameter_expansion
+                named_parameter_expansion,
             )
         # bool, bytes, etc
-        raise ApiValueError('Unable to generate a ref6570 representation of {}'.format(in_data))
+        raise ApiValueError(
+            "Unable to generate a ref6570 representation of {}".format(in_data)
+        )
 
 
 class StyleFormSerializer(ParameterSerializerBase):
@@ -294,16 +312,16 @@ def _serialize_form(
         name: str,
         explode: bool,
         percent_encode: bool,
-        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None
+        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None,
     ) -> str:
         if prefix_separator_iterator is None:
-            prefix_separator_iterator = PrefixSeparatorIterator('?', '&')
+            prefix_separator_iterator = PrefixSeparatorIterator("?", "&")
         return self._ref6570_expansion(
             variable_name=name,
             in_data=in_data,
             explode=explode,
             percent_encode=percent_encode,
-            prefix_separator_iterator=prefix_separator_iterator
+            prefix_separator_iterator=prefix_separator_iterator,
         )
 
 
@@ -314,15 +332,15 @@ def _serialize_simple(
         in_data: typing.Union[None, int, float, str, bool, dict, list],
         name: str,
         explode: bool,
-        percent_encode: bool
+        percent_encode: bool,
     ) -> str:
-        prefix_separator_iterator = PrefixSeparatorIterator('', ',')
+        prefix_separator_iterator = PrefixSeparatorIterator("", ",")
         return self._ref6570_expansion(
             variable_name=name,
             in_data=in_data,
             explode=explode,
             percent_encode=percent_encode,
-            prefix_separator_iterator=prefix_separator_iterator
+            prefix_separator_iterator=prefix_separator_iterator,
         )
 
 
@@ -334,6 +352,7 @@ class JSONDetector:
     application/json-patch+json
     application/geo+json
     """
+
     __json_content_type_pattern = re.compile("application/[^+]*[+]?(json);?.*")
 
     @classmethod
@@ -369,17 +388,19 @@ class ParameterBase(JSONDetector):
         ParameterInType.HEADER: ParameterStyle.SIMPLE,
         ParameterInType.COOKIE: ParameterStyle.FORM,
     }
-    __disallowed_header_names = {'Accept', 'Content-Type', 'Authorization'}
+    __disallowed_header_names = {"Accept", "Content-Type", "Authorization"}
     _json_encoder = JSONEncoder()
 
     @classmethod
-    def __verify_style_to_in_type(cls, style: typing.Optional[ParameterStyle], in_type: ParameterInType):
+    def __verify_style_to_in_type(
+        cls, style: typing.Optional[ParameterStyle], in_type: ParameterInType
+    ):
         if style is None:
             return
         in_type_set = cls.__style_to_in_type[style]
         if in_type not in in_type_set:
             raise ValueError(
-                'Invalid style and in_type combination. For style={} only in_type={} are allowed'.format(
+                "Invalid style and in_type combination. For style={} only in_type={} are allowed".format(
                     style, in_type_set
                 )
             )
@@ -393,19 +414,29 @@ def __init__(
         explode: bool = False,
         allow_reserved: typing.Optional[bool] = None,
         schema: typing.Optional[typing.Type[Schema]] = None,
-        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
+        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None,
     ):
         if schema is None and content is None:
-            raise ValueError('Value missing; Pass in either schema or content')
+            raise ValueError("Value missing; Pass in either schema or content")
         if schema and content:
-            raise ValueError('Too many values provided. Both schema and content were provided. Only one may be input')
+            raise ValueError(
+                "Too many values provided. Both schema and content were provided. Only one may be input"
+            )
         if name in self.__disallowed_header_names and in_type is ParameterInType.HEADER:
-            raise ValueError('Invalid name, name may not be one of {}'.format(self.__disallowed_header_names))
+            raise ValueError(
+                "Invalid name, name may not be one of {}".format(
+                    self.__disallowed_header_names
+                )
+            )
         self.__verify_style_to_in_type(style, in_type)
         if content is None and style is None:
             style = self.__in_type_to_default_style[in_type]
-        if content is not None and in_type in self.__in_type_to_default_style and len(content) != 1:
-            raise ValueError('Invalid content length, content length must equal 1')
+        if (
+            content is not None
+            and in_type in self.__in_type_to_default_style
+            and len(content) != 1
+        ):
+            raise ValueError("Invalid content length, content length must equal 1")
         self.in_type = in_type
         self.name = name
         self.required = required
@@ -418,7 +449,7 @@ def __init__(
     def _serialize_json(
         self,
         in_data: typing.Union[None, int, float, str, bool, dict, list],
-        eliminate_whitespace: bool = False
+        eliminate_whitespace: bool = False,
     ) -> str:
         if eliminate_whitespace:
             return json.dumps(in_data, separators=self._json_encoder.compact_separators)
@@ -435,7 +466,7 @@ def __init__(
         explode: bool = False,
         allow_reserved: typing.Optional[bool] = None,
         schema: typing.Optional[typing.Type[Schema]] = None,
-        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
+        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None,
     ):
         super().__init__(
             name,
@@ -445,34 +476,32 @@ def __init__(
             explode=explode,
             allow_reserved=allow_reserved,
             schema=schema,
-            content=content
+            content=content,
         )
 
     def __serialize_label(
-        self,
-        in_data: typing.Union[None, int, float, str, bool, dict, list]
+        self, in_data: typing.Union[None, int, float, str, bool, dict, list]
     ) -> typing.Dict[str, str]:
-        prefix_separator_iterator = PrefixSeparatorIterator('.', '.')
+        prefix_separator_iterator = PrefixSeparatorIterator(".", ".")
         value = self._ref6570_expansion(
             variable_name=self.name,
             in_data=in_data,
             explode=self.explode,
             percent_encode=True,
-            prefix_separator_iterator=prefix_separator_iterator
+            prefix_separator_iterator=prefix_separator_iterator,
         )
         return self._to_dict(self.name, value)
 
     def __serialize_matrix(
-        self,
-        in_data: typing.Union[None, int, float, str, bool, dict, list]
+        self, in_data: typing.Union[None, int, float, str, bool, dict, list]
     ) -> typing.Dict[str, str]:
-        prefix_separator_iterator = PrefixSeparatorIterator(';', ';')
+        prefix_separator_iterator = PrefixSeparatorIterator(";", ";")
         value = self._ref6570_expansion(
             variable_name=self.name,
             in_data=in_data,
             explode=self.explode,
             percent_encode=True,
-            prefix_separator_iterator=prefix_separator_iterator
+            prefix_separator_iterator=prefix_separator_iterator,
         )
         return self._to_dict(self.name, value)
 
@@ -481,17 +510,27 @@ def __serialize_simple(
         in_data: typing.Union[None, int, float, str, bool, dict, list],
     ) -> typing.Dict[str, str]:
         value = self._serialize_simple(
-            in_data=in_data,
-            name=self.name,
-            explode=self.explode,
-            percent_encode=True
+            in_data=in_data, name=self.name, explode=self.explode, percent_encode=True
         )
         return self._to_dict(self.name, value)
 
     def serialize(
         self,
         in_data: typing.Union[
-            Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict]
+            Schema,
+            Decimal,
+            int,
+            float,
+            str,
+            date,
+            datetime,
+            None,
+            bool,
+            list,
+            tuple,
+            dict,
+            frozendict.frozendict,
+        ],
     ) -> typing.Dict[str, str]:
         if self.schema:
             cast_in_data = self.schema(in_data)
@@ -519,7 +558,9 @@ def serialize(
             if self._content_type_is_json(content_type):
                 value = self._serialize_json(cast_in_data)
                 return self._to_dict(self.name, value)
-            raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
+            raise NotImplementedError(
+                "Serialization of {} has not yet been implemented".format(content_type)
+            )
 
 
 class QueryParameter(ParameterBase, StyleFormSerializer):
@@ -532,10 +573,12 @@ def __init__(
         explode: typing.Optional[bool] = None,
         allow_reserved: typing.Optional[bool] = None,
         schema: typing.Optional[typing.Type[Schema]] = None,
-        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
+        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None,
     ):
         used_style = ParameterStyle.FORM if style is None else style
-        used_explode = self._get_default_explode(used_style) if explode is None else explode
+        used_explode = (
+            self._get_default_explode(used_style) if explode is None else explode
+        )
 
         super().__init__(
             name,
@@ -545,13 +588,13 @@ def __init__(
             explode=used_explode,
             allow_reserved=allow_reserved,
             schema=schema,
-            content=content
+            content=content,
         )
 
     def __serialize_space_delimited(
         self,
         in_data: typing.Union[None, int, float, str, bool, dict, list],
-        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator]
+        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator],
     ) -> typing.Dict[str, str]:
         if prefix_separator_iterator is None:
             prefix_separator_iterator = self.get_prefix_separator_iterator()
@@ -560,14 +603,14 @@ def __serialize_space_delimited(
             in_data=in_data,
             explode=self.explode,
             percent_encode=True,
-            prefix_separator_iterator=prefix_separator_iterator
+            prefix_separator_iterator=prefix_separator_iterator,
         )
         return self._to_dict(self.name, value)
 
     def __serialize_pipe_delimited(
         self,
         in_data: typing.Union[None, int, float, str, bool, dict, list],
-        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator]
+        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator],
     ) -> typing.Dict[str, str]:
         if prefix_separator_iterator is None:
             prefix_separator_iterator = self.get_prefix_separator_iterator()
@@ -576,14 +619,14 @@ def __serialize_pipe_delimited(
             in_data=in_data,
             explode=self.explode,
             percent_encode=True,
-            prefix_separator_iterator=prefix_separator_iterator
+            prefix_separator_iterator=prefix_separator_iterator,
         )
         return self._to_dict(self.name, value)
 
     def __serialize_form(
         self,
         in_data: typing.Union[None, int, float, str, bool, dict, list],
-        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator]
+        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator],
     ) -> typing.Dict[str, str]:
         if prefix_separator_iterator is None:
             prefix_separator_iterator = self.get_prefix_separator_iterator()
@@ -592,23 +635,36 @@ def __serialize_form(
             name=self.name,
             explode=self.explode,
             percent_encode=True,
-            prefix_separator_iterator=prefix_separator_iterator
+            prefix_separator_iterator=prefix_separator_iterator,
         )
         return self._to_dict(self.name, value)
 
     def get_prefix_separator_iterator(self) -> typing.Optional[PrefixSeparatorIterator]:
         if self.style is ParameterStyle.FORM:
-            return PrefixSeparatorIterator('?', '&')
+            return PrefixSeparatorIterator("?", "&")
         elif self.style is ParameterStyle.SPACE_DELIMITED:
-            return PrefixSeparatorIterator('', '%20')
+            return PrefixSeparatorIterator("", "%20")
         elif self.style is ParameterStyle.PIPE_DELIMITED:
-            return PrefixSeparatorIterator('', '|')
+            return PrefixSeparatorIterator("", "|")
 
     def serialize(
         self,
         in_data: typing.Union[
-            Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict],
-        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None
+            Schema,
+            Decimal,
+            int,
+            float,
+            str,
+            date,
+            datetime,
+            None,
+            bool,
+            list,
+            tuple,
+            dict,
+            frozendict.frozendict,
+        ],
+        prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None,
     ) -> typing.Dict[str, str]:
         if self.schema:
             cast_in_data = self.schema(in_data)
@@ -629,11 +685,17 @@ def serialize(
             if self.style:
                 # TODO update query ones to omit setting values when [] {} or None is input
                 if self.style is ParameterStyle.FORM:
-                    return self.__serialize_form(cast_in_data, prefix_separator_iterator)
+                    return self.__serialize_form(
+                        cast_in_data, prefix_separator_iterator
+                    )
                 elif self.style is ParameterStyle.SPACE_DELIMITED:
-                    return self.__serialize_space_delimited(cast_in_data, prefix_separator_iterator)
+                    return self.__serialize_space_delimited(
+                        cast_in_data, prefix_separator_iterator
+                    )
                 elif self.style is ParameterStyle.PIPE_DELIMITED:
-                    return self.__serialize_pipe_delimited(cast_in_data, prefix_separator_iterator)
+                    return self.__serialize_pipe_delimited(
+                        cast_in_data, prefix_separator_iterator
+                    )
         # self.content will be length one
         if prefix_separator_iterator is None:
             prefix_separator_iterator = self.get_prefix_separator_iterator()
@@ -644,9 +706,11 @@ def serialize(
                 value = self._serialize_json(cast_in_data, eliminate_whitespace=True)
                 return self._to_dict(
                     self.name,
-                    next(prefix_separator_iterator) + self.name + '=' + quote(value)
+                    next(prefix_separator_iterator) + self.name + "=" + quote(value),
                 )
-            raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
+            raise NotImplementedError(
+                "Serialization of {} has not yet been implemented".format(content_type)
+            )
 
 
 class CookieParameter(ParameterBase, StyleFormSerializer):
@@ -659,10 +723,16 @@ def __init__(
         explode: typing.Optional[bool] = None,
         allow_reserved: typing.Optional[bool] = None,
         schema: typing.Optional[typing.Type[Schema]] = None,
-        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
+        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None,
     ):
-        used_style = ParameterStyle.FORM if style is None and content is None and schema else style
-        used_explode = self._get_default_explode(used_style) if explode is None else explode
+        used_style = (
+            ParameterStyle.FORM
+            if style is None and content is None and schema
+            else style
+        )
+        used_explode = (
+            self._get_default_explode(used_style) if explode is None else explode
+        )
 
         super().__init__(
             name,
@@ -672,13 +742,26 @@ def __init__(
             explode=used_explode,
             allow_reserved=allow_reserved,
             schema=schema,
-            content=content
+            content=content,
         )
 
     def serialize(
         self,
         in_data: typing.Union[
-            Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict]
+            Schema,
+            Decimal,
+            int,
+            float,
+            str,
+            date,
+            datetime,
+            None,
+            bool,
+            list,
+            tuple,
+            dict,
+            frozendict.frozendict,
+        ],
     ) -> typing.Dict[str, str]:
         if self.schema:
             cast_in_data = self.schema(in_data)
@@ -697,7 +780,7 @@ def serialize(
                     explode=self.explode,
                     name=self.name,
                     percent_encode=False,
-                    prefix_separator_iterator=PrefixSeparatorIterator('', '&')
+                    prefix_separator_iterator=PrefixSeparatorIterator("", "&"),
                 )
                 return self._to_dict(self.name, value)
         # self.content will be length one
@@ -707,7 +790,9 @@ def serialize(
             if self._content_type_is_json(content_type):
                 value = self._serialize_json(cast_in_data)
                 return self._to_dict(self.name, value)
-            raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
+            raise NotImplementedError(
+                "Serialization of {} has not yet been implemented".format(content_type)
+            )
 
 
 class HeaderParameter(ParameterBase, StyleSimpleSerializer):
@@ -719,7 +804,7 @@ def __init__(
         explode: bool = False,
         allow_reserved: typing.Optional[bool] = None,
         schema: typing.Optional[typing.Type[Schema]] = None,
-        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None
+        content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None,
     ):
         super().__init__(
             name,
@@ -729,11 +814,13 @@ def __init__(
             explode=explode,
             allow_reserved=allow_reserved,
             schema=schema,
-            content=content
+            content=content,
         )
 
     @staticmethod
-    def __to_headers(in_data: typing.Tuple[typing.Tuple[str, str], ...]) -> HTTPHeaderDict:
+    def __to_headers(
+        in_data: typing.Tuple[typing.Tuple[str, str], ...],
+    ) -> HTTPHeaderDict:
         data = tuple(t for t in in_data if t)
         headers = HTTPHeaderDict()
         if not data:
@@ -744,7 +831,20 @@ def __to_headers(in_data: typing.Tuple[typing.Tuple[str, str], ...]) -> HTTPHead
     def serialize(
         self,
         in_data: typing.Union[
-            Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict]
+            Schema,
+            Decimal,
+            int,
+            float,
+            str,
+            date,
+            datetime,
+            None,
+            bool,
+            list,
+            tuple,
+            dict,
+            frozendict.frozendict,
+        ],
     ) -> HTTPHeaderDict:
         if self.schema:
             cast_in_data = self.schema(in_data)
@@ -755,7 +855,9 @@ def serialize(
                     returns headers: dict
             """
             if self.style:
-                value = self._serialize_simple(cast_in_data, self.name, self.explode, False)
+                value = self._serialize_simple(
+                    cast_in_data, self.name, self.explode, False
+                )
                 return self.__to_headers(((self.name, value),))
         # self.content will be length one
         for content_type, schema in self.content.items():
@@ -764,7 +866,9 @@ def serialize(
             if self._content_type_is_json(content_type):
                 value = self._serialize_json(cast_in_data)
                 return self.__to_headers(((self.name, value),))
-            raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type))
+            raise NotImplementedError(
+                "Serialization of {} has not yet been implemented".format(content_type)
+            )
 
 
 class Encoding:
@@ -793,6 +897,7 @@ class MediaType:
         The encoding object SHALL only apply to requestBody objects when the media type is
         multipart or application/x-www-form-urlencoded.
     """
+
     schema: typing.Optional[typing.Type[Schema]] = None
     encoding: typing.Optional[typing.Dict[str, Encoding]] = None
 
@@ -807,7 +912,7 @@ def __init__(
         self,
         response: urllib3.HTTPResponse,
         body: typing.Union[Unset, typing.Type[Schema]],
-        headers: typing.Union[Unset, typing.List[HeaderParameter]]
+        headers: typing.Union[Unset, typing.List[HeaderParameter]],
     ):
         """
         pycharm needs this to prevent 'Unexpected argument' warnings
@@ -835,7 +940,9 @@ def __init__(
     ):
         self.headers = headers
         if content is not None and len(content) == 0:
-            raise ValueError('Invalid value for content, the content dict must have >= 1 entry')
+            raise ValueError(
+                "Invalid value for content, the content dict must have >= 1 entry"
+            )
         self.content = content
         self.response_cls = response_cls
 
@@ -845,7 +952,9 @@ def __deserialize_json(response: urllib3.HTTPResponse) -> typing.Any:
         return json.loads(response.data)
 
     @staticmethod
-    def __file_name_from_response_url(response_url: typing.Optional[str]) -> typing.Optional[str]:
+    def __file_name_from_response_url(
+        response_url: typing.Optional[str],
+    ) -> typing.Optional[str]:
         if response_url is None:
             return None
         url_path = urlparse(response_url).path
@@ -858,7 +967,9 @@ def __file_name_from_response_url(response_url: typing.Optional[str]) -> typing.
         return None
 
     @classmethod
-    def __file_name_from_content_disposition(cls, content_disposition: typing.Optional[str]) -> typing.Optional[str]:
+    def __file_name_from_content_disposition(
+        cls, content_disposition: typing.Optional[str]
+    ) -> typing.Optional[str]:
         if content_disposition is None:
             return None
         match = cls.__filename_content_disposition_pattern.search(content_disposition)
@@ -876,17 +987,16 @@ def __deserialize_application_octet_stream(
             a file will be written and returned
         """
         if response.supports_chunked_reads():
-            file_name = (
-                self.__file_name_from_content_disposition(response.headers.get('content-disposition'))
-                or self.__file_name_from_response_url(response.geturl())
-            )
+            file_name = self.__file_name_from_content_disposition(
+                response.headers.get("content-disposition")
+            ) or self.__file_name_from_response_url(response.geturl())
 
             if file_name is None:
                 _fd, path = tempfile.mkstemp()
             else:
                 path = os.path.join(tempfile.gettempdir(), file_name)
 
-            with open(path, 'wb') as new_file:
+            with open(path, "wb") as new_file:
                 chunk_size = 1024
                 while True:
                     data = response.read(chunk_size)
@@ -895,27 +1005,29 @@ def __deserialize_application_octet_stream(
                     new_file.write(data)
             # release_conn is needed for streaming connections only
             response.release_conn()
-            new_file = open(path, 'rb')
+            new_file = open(path, "rb")
             return new_file
         else:
             return response.data
 
     @staticmethod
     def __deserialize_multipart_form_data(
-        response: urllib3.HTTPResponse
+        response: urllib3.HTTPResponse,
     ) -> typing.Dict[str, typing.Any]:
         msg = email.message_from_bytes(response.data)
         return {
-            part.get_param("name", header="Content-Disposition"): part.get_payload(
-                decode=True
-            ).decode(part.get_content_charset())
-            if part.get_content_charset()
-            else part.get_payload()
+            part.get_param("name", header="Content-Disposition"): (
+                part.get_payload(decode=True).decode(part.get_content_charset())
+                if part.get_content_charset()
+                else part.get_payload()
+            )
             for part in msg.get_payload()
         }
 
-    def deserialize(self, response: urllib3.HTTPResponse, configuration: Configuration) -> ApiResponse:
-        content_type = response.getheader('content-type')
+    def deserialize(
+        self, response: urllib3.HTTPResponse, configuration: Configuration
+    ) -> ApiResponse:
+        content_type = response.getheader("content-type")
         deserialized_body = unset
         streamed = response.supports_chunked_reads()
 
@@ -934,29 +1046,30 @@ def deserialize(self, response: urllib3.HTTPResponse, configuration: Configurati
             if body_schema is None:
                 # some specs do not define response content media type schemas
                 return self.response_cls(
-                    response=response,
-                    headers=deserialized_headers,
-                    body=unset
+                    response=response, headers=deserialized_headers, body=unset
                 )
 
             if self._content_type_is_json(content_type):
                 body_data = self.__deserialize_json(response)
-            elif content_type == 'application/octet-stream':
+            elif content_type == "application/octet-stream":
                 body_data = self.__deserialize_application_octet_stream(response)
-            elif content_type.startswith('multipart/form-data'):
+            elif content_type.startswith("multipart/form-data"):
                 body_data = self.__deserialize_multipart_form_data(response)
-                content_type = 'multipart/form-data'
+                content_type = "multipart/form-data"
             else:
-                raise NotImplementedError('Deserialization of {} has not yet been implemented'.format(content_type))
+                raise NotImplementedError(
+                    "Deserialization of {} has not yet been implemented".format(
+                        content_type
+                    )
+                )
             deserialized_body = body_schema.from_openapi_data_oapg(
-                body_data, _configuration=configuration)
+                body_data, _configuration=configuration
+            )
         elif streamed:
             response.release_conn()
 
         return self.response_cls(
-            response=response,
-            headers=deserialized_headers,
-            body=deserialized_body
+            response=response, headers=deserialized_headers, body=deserialized_body
         )
 
 
@@ -990,7 +1103,7 @@ def __init__(
         header_name: typing.Optional[str] = None,
         header_value: typing.Optional[str] = None,
         cookie: typing.Optional[str] = None,
-        pool_threads: int = 1
+        pool_threads: int = 1,
     ):
         if configuration is None:
             configuration = Configuration()
@@ -1003,7 +1116,7 @@ def __init__(
             self.default_headers[header_name] = header_value
         self.cookie = cookie
         # Set default User-Agent.
-        self.user_agent = 'OpenAPI-Generator/1.0.0/python'
+        self.user_agent = "OpenAPI-Generator/1.0.0/python"
 
     def __enter__(self):
         return self
@@ -1016,13 +1129,13 @@ def close(self):
             self._pool.close()
             self._pool.join()
             self._pool = None
-            if hasattr(atexit, 'unregister'):
+            if hasattr(atexit, "unregister"):
                 atexit.unregister(self.close)
 
     @property
     def pool(self):
         """Create thread pool on first request
-         avoids instantiating unused threadpool for blocking clients.
+        avoids instantiating unused threadpool for blocking clients.
         """
         if self._pool is None:
             atexit.register(self.close)
@@ -1032,11 +1145,11 @@ def pool(self):
     @property
     def user_agent(self):
         """User agent for this API client"""
-        return self.default_headers['User-Agent']
+        return self.default_headers["User-Agent"]
 
     @user_agent.setter
     def user_agent(self, value):
-        self.default_headers['User-Agent'] = value
+        self.default_headers["User-Agent"] = value
 
     def set_default_header(self, header_name, header_value):
         self.default_headers[header_name] = header_value
@@ -1057,11 +1170,12 @@ def __call_api(
         # header parameters
         used_headers = HTTPHeaderDict(self.default_headers)
         if self.cookie:
-            headers['Cookie'] = self.cookie
+            headers["Cookie"] = self.cookie
 
         # auth setting
-        self.update_params_for_auth(used_headers,
-                                    auth_settings, resource_path, method, body)
+        self.update_params_for_auth(
+            used_headers, auth_settings, resource_path, method, body
+        )
 
         # must happen after cookie setting and auth setting in case user is overriding those
         if headers:
@@ -1159,7 +1273,7 @@ def call_api(
                 stream,
                 timeout,
                 host,
-            )
+            ),
         )
 
     def request(
@@ -1174,57 +1288,62 @@ def request(
     ) -> urllib3.HTTPResponse:
         """Makes the HTTP request using RESTClient."""
         if method == "GET":
-            return self.rest_client.GET(url,
-                                        stream=stream,
-                                        timeout=timeout,
-                                        headers=headers)
+            return self.rest_client.GET(
+                url, stream=stream, timeout=timeout, headers=headers
+            )
         elif method == "HEAD":
-            return self.rest_client.HEAD(url,
-                                         stream=stream,
-                                         timeout=timeout,
-                                         headers=headers)
+            return self.rest_client.HEAD(
+                url, stream=stream, timeout=timeout, headers=headers
+            )
         elif method == "OPTIONS":
-            return self.rest_client.OPTIONS(url,
-                                            headers=headers,
-                                            fields=fields,
-                                            stream=stream,
-                                            timeout=timeout,
-                                            body=body)
+            return self.rest_client.OPTIONS(
+                url,
+                headers=headers,
+                fields=fields,
+                stream=stream,
+                timeout=timeout,
+                body=body,
+            )
         elif method == "POST":
-            return self.rest_client.POST(url,
-                                         headers=headers,
-                                         fields=fields,
-                                         stream=stream,
-                                         timeout=timeout,
-                                         body=body)
+            return self.rest_client.POST(
+                url,
+                headers=headers,
+                fields=fields,
+                stream=stream,
+                timeout=timeout,
+                body=body,
+            )
         elif method == "PUT":
-            return self.rest_client.PUT(url,
-                                        headers=headers,
-                                        fields=fields,
-                                        stream=stream,
-                                        timeout=timeout,
-                                        body=body)
+            return self.rest_client.PUT(
+                url,
+                headers=headers,
+                fields=fields,
+                stream=stream,
+                timeout=timeout,
+                body=body,
+            )
         elif method == "PATCH":
-            return self.rest_client.PATCH(url,
-                                          headers=headers,
-                                          fields=fields,
-                                          stream=stream,
-                                          timeout=timeout,
-                                          body=body)
+            return self.rest_client.PATCH(
+                url,
+                headers=headers,
+                fields=fields,
+                stream=stream,
+                timeout=timeout,
+                body=body,
+            )
         elif method == "DELETE":
-            return self.rest_client.DELETE(url,
-                                           headers=headers,
-                                           stream=stream,
-                                           timeout=timeout,
-                                           body=body)
+            return self.rest_client.DELETE(
+                url, headers=headers, stream=stream, timeout=timeout, body=body
+            )
         else:
             raise ApiValueError(
                 "http method must be `GET`, `HEAD`, `OPTIONS`,"
                 " `POST`, `PATCH`, `PUT` or `DELETE`."
             )
 
-    def update_params_for_auth(self, headers, auth_settings,
-                               resource_path, method, body):
+    def update_params_for_auth(
+        self, headers, auth_settings, resource_path, method, body
+    ):
         """Updates header and query params based on authentication setting.
 
         :param headers: Header parameters dict to be updated.
@@ -1241,20 +1360,20 @@ def update_params_for_auth(self, headers, auth_settings,
             auth_setting = self.configuration.auth_settings().get(auth)
             if not auth_setting:
                 continue
-            if auth_setting['in'] == 'cookie':
-                headers.add('Cookie', auth_setting['value'])
-            elif auth_setting['in'] == 'header':
-                if auth_setting['type'] != 'http-signature':
-                    headers.add(auth_setting['key'], auth_setting['value'])
-            elif auth_setting['in'] == 'query':
-                """ TODO implement auth in query
+            if auth_setting["in"] == "cookie":
+                headers.add("Cookie", auth_setting["value"])
+            elif auth_setting["in"] == "header":
+                if auth_setting["type"] != "http-signature":
+                    headers.add(auth_setting["key"], auth_setting["value"])
+            elif auth_setting["in"] == "query":
+                """TODO implement auth in query
                 need to pass in prefix_separator_iterator
                 and need to output resource_path with query params added
                 """
                 raise ApiValueError("Auth in query not yet implemented")
             else:
                 raise ApiValueError(
-                    'Authentication token must be in `query` or `header`'
+                    "Authentication token must be in `query` or `header`"
                 )
 
 
@@ -1271,7 +1390,10 @@ def __init__(self, api_client: typing.Optional[ApiClient] = None):
         self.api_client = api_client
 
     @staticmethod
-    def _verify_typed_dict_inputs_oapg(cls: typing.Type[typing_extensions.TypedDict], data: typing.Dict[str, typing.Any]):
+    def _verify_typed_dict_inputs_oapg(
+        cls: typing.Type[typing_extensions.TypedDict],
+        data: typing.Dict[str, typing.Any],
+    ):
         """
         Ensures that:
         - required keys are present
@@ -1290,14 +1412,16 @@ def _verify_typed_dict_inputs_oapg(cls: typing.Type[typing_extensions.TypedDict]
                 required_keys_with_unset_values.append(required_key)
         if missing_required_keys:
             raise ApiTypeError(
-                '{} missing {} required arguments: {}'.format(
+                "{} missing {} required arguments: {}".format(
                     cls.__name__, len(missing_required_keys), missing_required_keys
-                 )
-             )
+                )
+            )
         if required_keys_with_unset_values:
             raise ApiValueError(
-                '{} contains invalid unset values for {} required keys: {}'.format(
-                    cls.__name__, len(required_keys_with_unset_values), required_keys_with_unset_values
+                "{} contains invalid unset values for {} required keys: {}".format(
+                    cls.__name__,
+                    len(required_keys_with_unset_values),
+                    required_keys_with_unset_values,
                 )
             )
 
@@ -1308,8 +1432,10 @@ def _verify_typed_dict_inputs_oapg(cls: typing.Type[typing_extensions.TypedDict]
             disallowed_additional_keys.append(key)
         if disallowed_additional_keys:
             raise ApiTypeError(
-                '{} got {} unexpected keyword arguments: {}'.format(
-                    cls.__name__, len(disallowed_additional_keys), disallowed_additional_keys
+                "{} got {} unexpected keyword arguments: {}".format(
+                    cls.__name__,
+                    len(disallowed_additional_keys),
+                    disallowed_additional_keys,
                 )
             )
 
@@ -1317,7 +1443,7 @@ def _get_host_oapg(
         self,
         operation_id: str,
         servers: typing.Tuple[typing.Dict[str, str], ...] = tuple(),
-        host_index: typing.Optional[int] = None
+        host_index: typing.Optional[int] = None,
     ) -> typing.Optional[str]:
         configuration = self.api_client.configuration
         try:
@@ -1336,8 +1462,7 @@ def _get_host_oapg(
         except IndexError:
             if servers:
                 raise ApiValueError(
-                    "Invalid host index. Must be 0 <= index < %s" %
-                    len(servers)
+                    "Invalid host index. Must be 0 <= index < %s" % len(servers)
                 )
             host = None
         return host
@@ -1353,6 +1478,7 @@ class RequestBody(StyleFormSerializer, JSONDetector):
     A request body parameter
     content: content_type to MediaType Schema info
     """
+
     __json_encoder = JSONEncoder()
 
     def __init__(
@@ -1362,46 +1488,57 @@ def __init__(
     ):
         self.required = required
         if len(content) == 0:
-            raise ValueError('Invalid value for content, the content dict must have >= 1 entry')
+            raise ValueError(
+                "Invalid value for content, the content dict must have >= 1 entry"
+            )
         self.content = content
 
-    def __serialize_json(
-        self,
-        in_data: typing.Any
-    ) -> typing.Dict[str, bytes]:
+    def __serialize_json(self, in_data: typing.Any) -> typing.Dict[str, bytes]:
         in_data = self.__json_encoder.default(in_data)
-        json_str = json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode(
-            "utf-8"
-        )
+        json_str = json.dumps(
+            in_data, separators=(",", ":"), ensure_ascii=False
+        ).encode("utf-8")
         return dict(body=json_str)
 
     @staticmethod
     def __serialize_text_plain(in_data: typing.Any) -> typing.Dict[str, str]:
         if isinstance(in_data, frozendict.frozendict):
-            raise ValueError('Unable to serialize type frozendict.frozendict to text/plain')
+            raise ValueError(
+                "Unable to serialize type frozendict.frozendict to text/plain"
+            )
         elif isinstance(in_data, tuple):
-            raise ValueError('Unable to serialize type tuple to text/plain')
+            raise ValueError("Unable to serialize type tuple to text/plain")
         elif isinstance(in_data, NoneClass):
-            raise ValueError('Unable to serialize type NoneClass to text/plain')
+            raise ValueError("Unable to serialize type NoneClass to text/plain")
         elif isinstance(in_data, BoolClass):
-            raise ValueError('Unable to serialize type BoolClass to text/plain')
+            raise ValueError("Unable to serialize type BoolClass to text/plain")
         return dict(body=str(in_data))
 
     def __multipart_json_item(self, key: str, value: Schema) -> RequestField:
         json_value = self.__json_encoder.default(value)
-        return RequestField(name=key, data=json.dumps(json_value), headers={'Content-Type': 'application/json'})
+        return RequestField(
+            name=key,
+            data=json.dumps(json_value),
+            headers={"Content-Type": "application/json"},
+        )
 
     def __multipart_form_item(self, key: str, value: Schema) -> RequestField:
         if isinstance(value, str):
-            return RequestField(name=key, data=str(value), headers={'Content-Type': 'text/plain'})
+            return RequestField(
+                name=key, data=str(value), headers={"Content-Type": "text/plain"}
+            )
         elif isinstance(value, bytes):
-            return RequestField(name=key, data=value, headers={'Content-Type': 'application/octet-stream'})
+            return RequestField(
+                name=key,
+                data=value,
+                headers={"Content-Type": "application/octet-stream"},
+            )
         elif isinstance(value, FileIO):
             request_field = RequestField(
                 name=key,
                 data=value.read(),
                 filename=os.path.basename(value.name),
-                headers={'Content-Type': 'application/octet-stream'}
+                headers={"Content-Type": "application/octet-stream"},
             )
             value.close()
             return request_field
@@ -1412,7 +1549,9 @@ def __serialize_multipart_form_data(
         self, in_data: Schema
     ) -> typing.Dict[str, typing.Tuple[RequestField, ...]]:
         if not isinstance(in_data, frozendict.frozendict):
-            raise ValueError(f'Unable to serialize {in_data} to multipart/form-data because it is not a dict of data')
+            raise ValueError(
+                f"Unable to serialize {in_data} to multipart/form-data because it is not a dict of data"
+            )
         """
         In a multipart/form-data request body, each schema property, or each element of a schema array property,
         takes a section in the payload with an internal header as defined by RFC7578. The serialization strategy
@@ -1444,7 +1583,9 @@ def __serialize_multipart_form_data(
 
         return dict(fields=tuple(fields))
 
-    def __serialize_application_octet_stream(self, in_data: BinarySchema) -> typing.Dict[str, bytes]:
+    def __serialize_application_octet_stream(
+        self, in_data: BinarySchema
+    ) -> typing.Dict[str, bytes]:
         if isinstance(in_data, bytes):
             return dict(body=in_data)
         # FileIO type
@@ -1460,9 +1601,12 @@ def __serialize_application_x_www_form_data(
         """
         if not isinstance(in_data, frozendict.frozendict):
             raise ValueError(
-                f'Unable to serialize {in_data} to application/x-www-form-urlencoded because it is not a dict of data')
+                f"Unable to serialize {in_data} to application/x-www-form-urlencoded because it is not a dict of data"
+            )
         cast_in_data = self.__json_encoder.default(in_data)
-        value = self._serialize_form(cast_in_data, name='', explode=True, percent_encode=False)
+        value = self._serialize_form(
+            cast_in_data, name="", explode=True, percent_encode=False
+        )
         return dict(body=value)
 
     def serialize(
@@ -1488,12 +1632,14 @@ def serialize(
         # and content_type is multipart or application/x-www-form-urlencoded
         if self._content_type_is_json(content_type):
             return self.__serialize_json(cast_in_data)
-        elif content_type == 'text/plain':
+        elif content_type == "text/plain":
             return self.__serialize_text_plain(cast_in_data)
-        elif content_type == 'multipart/form-data':
+        elif content_type == "multipart/form-data":
             return self.__serialize_multipart_form_data(cast_in_data)
-        elif content_type == 'application/x-www-form-urlencoded':
+        elif content_type == "application/x-www-form-urlencoded":
             return self.__serialize_application_x_www_form_data(cast_in_data)
-        elif content_type == 'application/octet-stream':
+        elif content_type == "application/octet-stream":
             return self.__serialize_application_octet_stream(cast_in_data)
-        raise NotImplementedError('Serialization has not yet been implemented for {}'.format(content_type))
+        raise NotImplementedError(
+            "Serialization has not yet been implemented for {}".format(content_type)
+        )
diff --git a/domino/_impl/custommetrics/apis/path_to_api.py b/domino/_impl/custommetrics/apis/path_to_api.py
index 395bc8f6..646e59ae 100644
--- a/domino/_impl/custommetrics/apis/path_to_api.py
+++ b/domino/_impl/custommetrics/apis/path_to_api.py
@@ -1,17 +1,19 @@
 import typing_extensions
 
-from domino._impl.custommetrics.paths import PathValues
 from domino._impl.custommetrics.apis.paths.api_metric_alerts_v1 import ApiMetricAlertsV1
 from domino._impl.custommetrics.apis.paths.api_metric_values_v1 import ApiMetricValuesV1
-from domino._impl.custommetrics.apis.paths.api_metric_values_v1_model_monitoring_id_metric import ApiMetricValuesV1ModelMonitoringIdMetric
+from domino._impl.custommetrics.apis.paths.api_metric_values_v1_model_monitoring_id_metric import (
+    ApiMetricValuesV1ModelMonitoringIdMetric,
+)
+from domino._impl.custommetrics.paths import PathValues
 
 PathToApi = typing_extensions.TypedDict(
-    'PathToApi',
+    "PathToApi",
     {
         PathValues.API_METRIC_ALERTS_V1: ApiMetricAlertsV1,
         PathValues.API_METRIC_VALUES_V1: ApiMetricValuesV1,
         PathValues.API_METRIC_VALUES_V1_MODEL_MONITORING_ID_METRIC: ApiMetricValuesV1ModelMonitoringIdMetric,
-    }
+    },
 )
 
 path_to_api = PathToApi(
diff --git a/domino/_impl/custommetrics/apis/paths/api_metric_values_v1_model_monitoring_id_metric.py b/domino/_impl/custommetrics/apis/paths/api_metric_values_v1_model_monitoring_id_metric.py
index 7837510b..1039b1fc 100644
--- a/domino/_impl/custommetrics/apis/paths/api_metric_values_v1_model_monitoring_id_metric.py
+++ b/domino/_impl/custommetrics/apis/paths/api_metric_values_v1_model_monitoring_id_metric.py
@@ -1,4 +1,6 @@
-from domino._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric.get import ApiForget
+from domino._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric.get import (
+    ApiForget,
+)
 
 
 class ApiMetricValuesV1ModelMonitoringIdMetric(
diff --git a/domino/_impl/custommetrics/apis/tag_to_api.py b/domino/_impl/custommetrics/apis/tag_to_api.py
index 1dcbc3db..f32ad83f 100644
--- a/domino/_impl/custommetrics/apis/tag_to_api.py
+++ b/domino/_impl/custommetrics/apis/tag_to_api.py
@@ -4,10 +4,10 @@
 from domino._impl.custommetrics.apis.tags.custom_metrics_api import CustomMetricsApi
 
 TagToApi = typing_extensions.TypedDict(
-    'TagToApi',
+    "TagToApi",
     {
         TagValues.CUSTOM_METRICS: CustomMetricsApi,
-    }
+    },
 )
 
 tag_to_api = TagToApi(
diff --git a/domino/_impl/custommetrics/apis/tags/custom_metrics_api.py b/domino/_impl/custommetrics/apis/tags/custom_metrics_api.py
index 57603899..001b7390 100644
--- a/domino/_impl/custommetrics/apis/tags/custom_metrics_api.py
+++ b/domino/_impl/custommetrics/apis/tags/custom_metrics_api.py
@@ -1,17 +1,19 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from domino._impl.custommetrics.paths.api_metric_values_v1.post import LogMetricValues
-from domino._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric.get import RetrieveMetricValues
 from domino._impl.custommetrics.paths.api_metric_alerts_v1.post import SendMetricAlert
+from domino._impl.custommetrics.paths.api_metric_values_v1.post import LogMetricValues
+from domino._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric.get import (
+    RetrieveMetricValues,
+)
 
 
 class CustomMetricsApi(
@@ -24,4 +26,5 @@ class CustomMetricsApi(
 
     Do not edit the class manually.
     """
+
     pass
diff --git a/domino/_impl/custommetrics/configuration.py b/domino/_impl/custommetrics/configuration.py
index 59086c30..a223e376 100644
--- a/domino/_impl/custommetrics/configuration.py
+++ b/domino/_impl/custommetrics/configuration.py
@@ -1,29 +1,38 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import copy
 import logging
 import multiprocessing
 import sys
+from http import client as http_client
+
 import urllib3
 
-from http import client as http_client
 from domino._impl.custommetrics.exceptions import ApiValueError
 
-
 JSON_SCHEMA_VALIDATION_KEYWORDS = {
-    'multipleOf', 'maximum', 'exclusiveMaximum',
-    'minimum', 'exclusiveMinimum', 'maxLength',
-    'minLength', 'pattern', 'maxItems', 'minItems',
-    'uniqueItems', 'maxProperties', 'minProperties',
+    "multipleOf",
+    "maximum",
+    "exclusiveMaximum",
+    "minimum",
+    "exclusiveMinimum",
+    "maxLength",
+    "minLength",
+    "pattern",
+    "maxItems",
+    "minItems",
+    "uniqueItems",
+    "maxProperties",
+    "minProperties",
 }
 
 
@@ -81,16 +90,21 @@ class Configuration(object):
 
     _default = None
 
-    def __init__(self, host=None,
-                 api_key=None, api_key_prefix=None,
-                 username=None, password=None,
-                 discard_unknown_keys=False,
-                 disabled_client_side_validations="",
-                 server_index=None, server_variables=None,
-                 server_operation_index=None, server_operation_variables=None,
-                 ):
-        """Constructor
-        """
+    def __init__(
+        self,
+        host=None,
+        api_key=None,
+        api_key_prefix=None,
+        username=None,
+        password=None,
+        discard_unknown_keys=False,
+        disabled_client_side_validations="",
+        server_index=None,
+        server_variables=None,
+        server_operation_index=None,
+        server_operation_variables=None,
+    ):
+        """Constructor"""
         self._base_path = "http://localhost" if host is None else host
         """Default Base url
         """
@@ -132,7 +146,7 @@ def __init__(self, host=None,
         """
         self.logger["package_logger"] = logging.getLogger("domino._impl.custommetrics")
         self.logger["urllib3_logger"] = logging.getLogger("urllib3")
-        self.logger_format = '%(asctime)s %(levelname)s %(message)s'
+        self.logger_format = "%(asctime)s %(levelname)s %(message)s"
         """Log format
         """
         self.logger_stream_handler = None
@@ -180,7 +194,7 @@ def __init__(self, host=None,
         self.proxy_headers = None
         """Proxy headers
         """
-        self.safe_chars_for_path_param = ''
+        self.safe_chars_for_path_param = ""
         """Safe chars for path_param
         """
         self.retries = None
@@ -197,7 +211,7 @@ def __deepcopy__(self, memo):
         result = cls.__new__(cls)
         memo[id(self)] = result
         for k, v in self.__dict__.items():
-            if k not in ('logger', 'logger_file_handler'):
+            if k not in ("logger", "logger_file_handler"):
                 setattr(result, k, copy.deepcopy(v, memo))
         # shallow copy of loggers
         result.logger = copy.copy(self.logger)
@@ -208,12 +222,11 @@ def __deepcopy__(self, memo):
 
     def __setattr__(self, name, value):
         object.__setattr__(self, name, value)
-        if name == 'disabled_client_side_validations':
-            s = set(filter(None, value.split(',')))
+        if name == "disabled_client_side_validations":
+            s = set(filter(None, value.split(",")))
             for v in s:
                 if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
-                    raise ApiValueError(
-                        "Invalid keyword: '{0}''".format(v))
+                    raise ApiValueError("Invalid keyword: '{0}''".format(v))
             self._disabled_client_side_validations = s
 
     @classmethod
@@ -335,7 +348,9 @@ def get_api_key_with_prefix(self, identifier, alias=None):
         """
         if self.refresh_api_key_hook is not None:
             self.refresh_api_key_hook(self)
-        key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
+        key = self.api_key.get(
+            identifier, self.api_key.get(alias) if alias is not None else None
+        )
         if key:
             prefix = self.api_key_prefix.get(identifier)
             if prefix:
@@ -354,9 +369,9 @@ def get_basic_auth_token(self):
         password = ""
         if self.password is not None:
             password = self.password
-        return urllib3.util.make_headers(
-            basic_auth=username + ':' + password
-        ).get('authorization')
+        return urllib3.util.make_headers(basic_auth=username + ":" + password).get(
+            "authorization"
+        )
 
     def auth_settings(self):
         """Gets Auth Settings dict for api client.
@@ -371,12 +386,13 @@ def to_debug_report(self):
 
         :return: The report for debugging.
         """
-        return "Python SDK Debug Report:\n"\
-               "OS: {env}\n"\
-               "Python Version: {pyversion}\n"\
-               "Version of the API: 5.3.0\n"\
-               "SDK Package Version: 1.0.0".\
-               format(env=sys.platform, pyversion=sys.version)
+        return (
+            "Python SDK Debug Report:\n"
+            "OS: {env}\n"
+            "Python Version: {pyversion}\n"
+            "Version of the API: 5.3.0\n"
+            "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version)
+        )
 
     def get_host_settings(self):
         """Gets an array of host settings
@@ -385,8 +401,8 @@ def get_host_settings(self):
         """
         return [
             {
-                'url': "",
-                'description': "No description provided",
+                "url": "",
+                "description": "No description provided",
             }
         ]
 
@@ -408,22 +424,22 @@ def get_host_from_settings(self, index, variables=None, servers=None):
         except IndexError:
             raise ValueError(
                 "Invalid index {0} when selecting the host settings. "
-                "Must be less than {1}".format(index, len(servers)))
+                "Must be less than {1}".format(index, len(servers))
+            )
 
-        url = server['url']
+        url = server["url"]
 
         # go through variables and replace placeholders
-        for variable_name, variable in server.get('variables', {}).items():
-            used_value = variables.get(
-                variable_name, variable['default_value'])
+        for variable_name, variable in server.get("variables", {}).items():
+            used_value = variables.get(variable_name, variable["default_value"])
 
-            if 'enum_values' in variable \
-                    and used_value not in variable['enum_values']:
+            if "enum_values" in variable and used_value not in variable["enum_values"]:
                 raise ValueError(
                     "The variable `{0}` in the host URL has invalid value "
                     "{1}. Must be {2}.".format(
-                        variable_name, variables[variable_name],
-                        variable['enum_values']))
+                        variable_name, variables[variable_name], variable["enum_values"]
+                    )
+                )
 
             url = url.replace("{" + variable_name + "}", used_value)
 
@@ -432,7 +448,9 @@ def get_host_from_settings(self, index, variables=None, servers=None):
     @property
     def host(self):
         """Return generated host."""
-        return self.get_host_from_settings(self.server_index, variables=self.server_variables)
+        return self.get_host_from_settings(
+            self.server_index, variables=self.server_variables
+        )
 
     @host.setter
     def host(self, value):
diff --git a/domino/_impl/custommetrics/exceptions.py b/domino/_impl/custommetrics/exceptions.py
index 3e0d8415..7530a8ae 100644
--- a/domino/_impl/custommetrics/exceptions.py
+++ b/domino/_impl/custommetrics/exceptions.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 
@@ -15,9 +15,8 @@ class OpenApiException(Exception):
 
 
 class ApiTypeError(OpenApiException, TypeError):
-    def __init__(self, msg, path_to_item=None, valid_classes=None,
-                 key_type=None):
-        """ Raises an exception for TypeErrors
+    def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None):
+        """Raises an exception for TypeErrors
 
         Args:
             msg (str): the exception message
@@ -99,7 +98,12 @@ def __init__(self, msg, path_to_item=None):
 
 class ApiException(OpenApiException):
 
-    def __init__(self, status=None, reason=None, api_response: 'domino._impl.custommetrics.api_client.ApiResponse' = None):
+    def __init__(
+        self,
+        status=None,
+        reason=None,
+        api_response: "domino._impl.custommetrics.api_client.ApiResponse" = None,
+    ):
         if api_response:
             self.status = api_response.response.status
             self.reason = api_response.response.reason
@@ -113,11 +117,9 @@ def __init__(self, status=None, reason=None, api_response: 'domino._impl.customm
 
     def __str__(self):
         """Custom error messages for exception"""
-        error_message = "({0})\n"\
-                        "Reason: {1}\n".format(self.status, self.reason)
+        error_message = "({0})\n" "Reason: {1}\n".format(self.status, self.reason)
         if self.headers:
-            error_message += "HTTP response headers: {0}\n".format(
-                self.headers)
+            error_message += "HTTP response headers: {0}\n".format(self.headers)
 
         if self.body:
             error_message += "HTTP response body: {0}\n".format(self.body)
diff --git a/domino/_impl/custommetrics/model/failure_envelope_v1.py b/domino/_impl/custommetrics/model/failure_envelope_v1.py
index f2d8d63b..122c7fef 100644
--- a/domino/_impl/custommetrics/model/failure_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/failure_envelope_v1.py
@@ -1,31 +1,29 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
 
-class FailureEnvelopeV1(
-    schemas.DictSchema
-):
+class FailureEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -41,18 +39,29 @@ class MetaOapg:
         class properties:
             requestId = schemas.StrSchema
 
-            class errors(
-                schemas.ListSchema
-            ):
+            class errors(schemas.ListSchema):
 
                 class MetaOapg:
                     items = schemas.StrSchema
 
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
+                    arg: typing.Union[
+                        typing.Tuple[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                        typing.List[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'errors':
+                ) -> "errors":
                     return super().__new__(
                         cls,
                         arg,
@@ -61,6 +70,7 @@ def __new__(
 
                 def __getitem__(self, i: int) -> MetaOapg.items:
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "requestId": requestId,
                 "errors": errors,
@@ -70,38 +80,91 @@ def __getitem__(self, i: int) -> MetaOapg.items:
     errors: MetaOapg.properties.errors
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["errors"]) -> MetaOapg.properties.errors: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["errors"]
+    ) -> MetaOapg.properties.errors: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["requestId", "errors", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "errors",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["errors"]) -> MetaOapg.properties.errors: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["errors"]
+    ) -> MetaOapg.properties.errors: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requestId", "errors", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "errors",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        requestId: typing.Union[MetaOapg.properties.requestId, str, ],
-        errors: typing.Union[MetaOapg.properties.errors, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        requestId: typing.Union[
+            MetaOapg.properties.requestId,
+            str,
+        ],
+        errors: typing.Union[
+            MetaOapg.properties.errors,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'FailureEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "FailureEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/failure_envelope_v1.pyi b/domino/_impl/custommetrics/model/failure_envelope_v1.pyi
index 4bf16844..37ffdfe4 100644
--- a/domino/_impl/custommetrics/model/failure_envelope_v1.pyi
+++ b/domino/_impl/custommetrics/model/failure_envelope_v1.pyi
@@ -1,112 +1,162 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class FailureEnvelopeV1(
-    schemas.DictSchema
-):
+class FailureEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "requestId",
             "errors",
         }
-        
+
         class properties:
             requestId = schemas.StrSchema
-            
-            
-            class errors(
-                schemas.ListSchema
-            ):
-            
-            
+
+            class errors(schemas.ListSchema):
                 class MetaOapg:
                     items = schemas.StrSchema
-            
+
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
+                    arg: typing.Union[
+                        typing.Tuple[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                        typing.List[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'errors':
+                ) -> "errors":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> MetaOapg.items:
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "requestId": requestId,
                 "errors": errors,
             }
-    
+
     requestId: MetaOapg.properties.requestId
     errors: MetaOapg.properties.errors
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["errors"]) -> MetaOapg.properties.errors: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["errors"]
+    ) -> MetaOapg.properties.errors: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["requestId", "errors", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "errors",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["errors"]) -> MetaOapg.properties.errors: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["errors"]
+    ) -> MetaOapg.properties.errors: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requestId", "errors", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "errors",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        requestId: typing.Union[MetaOapg.properties.requestId, str, ],
-        errors: typing.Union[MetaOapg.properties.errors, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        requestId: typing.Union[
+            MetaOapg.properties.requestId,
+            str,
+        ],
+        errors: typing.Union[
+            MetaOapg.properties.errors,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'FailureEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "FailureEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py b/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py
index f924b41a..0adf6702 100644
--- a/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/invalid_body_envelope_v1.py
@@ -1,31 +1,29 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
 
-class InvalidBodyEnvelopeV1(
-    schemas.DictSchema
-):
+class InvalidBodyEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -46,31 +44,62 @@ class properties:
     message: MetaOapg.properties.message
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["message"]
+    ) -> MetaOapg.properties.message: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["message", ], str]):
+    def __getitem__(
+        self, name: typing.Union[typing_extensions.Literal["message",], str]
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["message"]
+    ) -> MetaOapg.properties.message: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
 
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["message", ], str]):
+    def get_item_oapg(
+        self, name: typing.Union[typing_extensions.Literal["message",], str]
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        message: typing.Union[MetaOapg.properties.message, str, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        message: typing.Union[
+            MetaOapg.properties.message,
+            str,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'InvalidBodyEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "InvalidBodyEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/invalid_body_envelope_v1.pyi b/domino/_impl/custommetrics/model/invalid_body_envelope_v1.pyi
index 72bdcfd6..4c32fb54 100644
--- a/domino/_impl/custommetrics/model/invalid_body_envelope_v1.pyi
+++ b/domino/_impl/custommetrics/model/invalid_body_envelope_v1.pyi
@@ -1,79 +1,100 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class InvalidBodyEnvelopeV1(
-    schemas.DictSchema
-):
+class InvalidBodyEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "message",
         }
-        
+
         class properties:
             message = schemas.StrSchema
             __annotations__ = {
                 "message": message,
             }
-    
+
     message: MetaOapg.properties.message
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["message"]
+    ) -> MetaOapg.properties.message: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["message", ], str]):
+    def __getitem__(
+        self, name: typing.Union[typing_extensions.Literal["message",], str]
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["message"]
+    ) -> MetaOapg.properties.message: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["message", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: typing.Union[typing_extensions.Literal["message",], str]
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        message: typing.Union[MetaOapg.properties.message, str, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        message: typing.Union[
+            MetaOapg.properties.message,
+            str,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'InvalidBodyEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "InvalidBodyEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metadata_v1.py b/domino/_impl/custommetrics/model/metadata_v1.py
index be027e44..38a04bbe 100644
--- a/domino/_impl/custommetrics/model/metadata_v1.py
+++ b/domino/_impl/custommetrics/model/metadata_v1.py
@@ -1,31 +1,29 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
 
-class MetadataV1(
-    schemas.DictSchema
-):
+class MetadataV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -41,18 +39,29 @@ class MetaOapg:
         class properties:
             requestId = schemas.StrSchema
 
-            class notices(
-                schemas.ListSchema
-            ):
+            class notices(schemas.ListSchema):
 
                 class MetaOapg:
                     items = schemas.StrSchema
 
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
+                    arg: typing.Union[
+                        typing.Tuple[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                        typing.List[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'notices':
+                ) -> "notices":
                     return super().__new__(
                         cls,
                         arg,
@@ -61,6 +70,7 @@ def __new__(
 
                 def __getitem__(self, i: int) -> MetaOapg.items:
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "requestId": requestId,
                 "notices": notices,
@@ -70,38 +80,91 @@ def __getitem__(self, i: int) -> MetaOapg.items:
     requestId: MetaOapg.properties.requestId
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["notices"]) -> MetaOapg.properties.notices: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["notices"]
+    ) -> MetaOapg.properties.notices: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["requestId", "notices", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "notices",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["notices"]) -> MetaOapg.properties.notices: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["notices"]
+    ) -> MetaOapg.properties.notices: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requestId", "notices", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "notices",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        notices: typing.Union[MetaOapg.properties.notices, list, tuple, ],
-        requestId: typing.Union[MetaOapg.properties.requestId, str, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        notices: typing.Union[
+            MetaOapg.properties.notices,
+            list,
+            tuple,
+        ],
+        requestId: typing.Union[
+            MetaOapg.properties.requestId,
+            str,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetadataV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetadataV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metadata_v1.pyi b/domino/_impl/custommetrics/model/metadata_v1.pyi
index 7ad772c5..8bea301d 100644
--- a/domino/_impl/custommetrics/model/metadata_v1.pyi
+++ b/domino/_impl/custommetrics/model/metadata_v1.pyi
@@ -1,112 +1,162 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class MetadataV1(
-    schemas.DictSchema
-):
+class MetadataV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "notices",
             "requestId",
         }
-        
+
         class properties:
             requestId = schemas.StrSchema
-            
-            
-            class notices(
-                schemas.ListSchema
-            ):
-            
-            
+
+            class notices(schemas.ListSchema):
                 class MetaOapg:
                     items = schemas.StrSchema
-            
+
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
+                    arg: typing.Union[
+                        typing.Tuple[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                        typing.List[
+                            typing.Union[
+                                MetaOapg.items,
+                                str,
+                            ]
+                        ],
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'notices':
+                ) -> "notices":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
-            
+
                 def __getitem__(self, i: int) -> MetaOapg.items:
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "requestId": requestId,
                 "notices": notices,
             }
-    
+
     notices: MetaOapg.properties.notices
     requestId: MetaOapg.properties.requestId
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["notices"]) -> MetaOapg.properties.notices: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["notices"]
+    ) -> MetaOapg.properties.notices: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["requestId", "notices", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "notices",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["requestId"]) -> MetaOapg.properties.requestId: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["requestId"]
+    ) -> MetaOapg.properties.requestId: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["notices"]) -> MetaOapg.properties.notices: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["notices"]
+    ) -> MetaOapg.properties.notices: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requestId", "notices", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "requestId",
+                "notices",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        notices: typing.Union[MetaOapg.properties.notices, list, tuple, ],
-        requestId: typing.Union[MetaOapg.properties.requestId, str, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        notices: typing.Union[
+            MetaOapg.properties.notices,
+            list,
+            tuple,
+        ],
+        requestId: typing.Union[
+            MetaOapg.properties.requestId,
+            str,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetadataV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetadataV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_alert_request_v1.py b/domino/_impl/custommetrics/model/metric_alert_request_v1.py
index 22a8c0bc..11b36be6 100644
--- a/domino/_impl/custommetrics/model/metric_alert_request_v1.py
+++ b/domino/_impl/custommetrics/model/metric_alert_request_v1.py
@@ -1,32 +1,31 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from .target_range_v1 import TargetRangeV1
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
+from .target_range_v1 import TargetRangeV1
+
 
-class MetricAlertRequestV1(
-    schemas.DictSchema
-):
+class MetricAlertRequestV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -47,8 +46,9 @@ class properties:
             value = schemas.NumberSchema
 
             @staticmethod
-            def targetRange() -> typing.Type['TargetRangeV1']:
+            def targetRange() -> typing.Type["TargetRangeV1"]:
                 return TargetRangeV1
+
             description = schemas.StrSchema
             __annotations__ = {
                 "modelMonitoringId": modelMonitoringId,
@@ -60,63 +60,140 @@ def targetRange() -> typing.Type['TargetRangeV1']:
 
     metric: MetaOapg.properties.metric
     modelMonitoringId: MetaOapg.properties.modelMonitoringId
-    targetRange: 'TargetRangeV1'
+    targetRange: "TargetRangeV1"
     value: MetaOapg.properties.value
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["targetRange"]) -> 'TargetRangeV1': ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["targetRange"]
+    ) -> "TargetRangeV1": ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["description"]
+    ) -> MetaOapg.properties.description: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "targetRange", "description", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "targetRange",
+                "description",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["targetRange"]) -> 'TargetRangeV1': ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["targetRange"]
+    ) -> "TargetRangeV1": ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["description"]
+    ) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "targetRange", "description", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "targetRange",
+                "description",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        metric: typing.Union[MetaOapg.properties.metric, str, ],
-        modelMonitoringId: typing.Union[MetaOapg.properties.modelMonitoringId, str, ],
-        targetRange: 'TargetRangeV1',
-        value: typing.Union[MetaOapg.properties.value, decimal.Decimal, int, float, ],
-        description: typing.Union[MetaOapg.properties.description, str, schemas.Unset] = schemas.unset,
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        metric: typing.Union[
+            MetaOapg.properties.metric,
+            str,
+        ],
+        modelMonitoringId: typing.Union[
+            MetaOapg.properties.modelMonitoringId,
+            str,
+        ],
+        targetRange: "TargetRangeV1",
+        value: typing.Union[
+            MetaOapg.properties.value,
+            decimal.Decimal,
+            int,
+            float,
+        ],
+        description: typing.Union[
+            MetaOapg.properties.description, str, schemas.Unset
+        ] = schemas.unset,
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricAlertRequestV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricAlertRequestV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_alert_request_v1.pyi b/domino/_impl/custommetrics/model/metric_alert_request_v1.pyi
index 050131fa..3335cee1 100644
--- a/domino/_impl/custommetrics/model/metric_alert_request_v1.pyi
+++ b/domino/_impl/custommetrics/model/metric_alert_request_v1.pyi
@@ -1,38 +1,34 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class MetricAlertRequestV1(
-    schemas.DictSchema
-):
+class MetricAlertRequestV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "metric",
@@ -40,14 +36,14 @@ class MetricAlertRequestV1(
             "targetRange",
             "value",
         }
-        
+
         class properties:
             modelMonitoringId = schemas.StrSchema
             metric = schemas.StrSchema
             value = schemas.NumberSchema
-        
+
             @staticmethod
-            def targetRange() -> typing.Type['TargetRangeV1']:
+            def targetRange() -> typing.Type["TargetRangeV1"]:
                 return TargetRangeV1
             description = schemas.StrSchema
             __annotations__ = {
@@ -57,68 +53,131 @@ class MetricAlertRequestV1(
                 "targetRange": targetRange,
                 "description": description,
             }
-    
+
     metric: MetaOapg.properties.metric
     modelMonitoringId: MetaOapg.properties.modelMonitoringId
-    targetRange: 'TargetRangeV1'
+    targetRange: "TargetRangeV1"
     value: MetaOapg.properties.value
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["targetRange"]) -> 'TargetRangeV1': ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["targetRange"]
+    ) -> "TargetRangeV1": ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["description"]
+    ) -> MetaOapg.properties.description: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "targetRange", "description", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "targetRange",
+                "description",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["targetRange"]) -> 'TargetRangeV1': ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["targetRange"]
+    ) -> "TargetRangeV1": ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["description"]
+    ) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "targetRange", "description", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "targetRange",
+                "description",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        metric: typing.Union[MetaOapg.properties.metric, str, ],
-        modelMonitoringId: typing.Union[MetaOapg.properties.modelMonitoringId, str, ],
-        targetRange: 'TargetRangeV1',
-        value: typing.Union[MetaOapg.properties.value, decimal.Decimal, int, float, ],
-        description: typing.Union[MetaOapg.properties.description, str, schemas.Unset] = schemas.unset,
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        metric: typing.Union[
+            MetaOapg.properties.metric,
+            str,
+        ],
+        modelMonitoringId: typing.Union[
+            MetaOapg.properties.modelMonitoringId,
+            str,
+        ],
+        targetRange: "TargetRangeV1",
+        value: typing.Union[
+            MetaOapg.properties.value,
+            decimal.Decimal,
+            int,
+            float,
+        ],
+        description: typing.Union[
+            MetaOapg.properties.description, str, schemas.Unset
+        ] = schemas.unset,
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricAlertRequestV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricAlertRequestV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_tag_v1.py b/domino/_impl/custommetrics/model/metric_tag_v1.py
index 93bc19a4..986e35aa 100644
--- a/domino/_impl/custommetrics/model/metric_tag_v1.py
+++ b/domino/_impl/custommetrics/model/metric_tag_v1.py
@@ -1,31 +1,29 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
 
-class MetricTagV1(
-    schemas.DictSchema
-):
+class MetricTagV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -50,38 +48,90 @@ class properties:
     key: MetaOapg.properties.key
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["key"]
+    ) -> MetaOapg.properties.key: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["key", "value", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "key",
+                "value",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["key"]
+    ) -> MetaOapg.properties.key: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["key", "value", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "key",
+                "value",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        value: typing.Union[MetaOapg.properties.value, str, ],
-        key: typing.Union[MetaOapg.properties.key, str, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        value: typing.Union[
+            MetaOapg.properties.value,
+            str,
+        ],
+        key: typing.Union[
+            MetaOapg.properties.key,
+            str,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricTagV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricTagV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_tag_v1.pyi b/domino/_impl/custommetrics/model/metric_tag_v1.pyi
index a954d441..5f404e19 100644
--- a/domino/_impl/custommetrics/model/metric_tag_v1.pyi
+++ b/domino/_impl/custommetrics/model/metric_tag_v1.pyi
@@ -1,44 +1,40 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class MetricTagV1(
-    schemas.DictSchema
-):
+class MetricTagV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "value",
             "key",
         }
-        
+
         class properties:
             key = schemas.StrSchema
             value = schemas.StrSchema
@@ -46,45 +42,89 @@ class MetricTagV1(
                 "key": key,
                 "value": value,
             }
-    
+
     value: MetaOapg.properties.value
     key: MetaOapg.properties.key
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["key"]
+    ) -> MetaOapg.properties.key: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["key", "value", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "key",
+                "value",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["key"]
+    ) -> MetaOapg.properties.key: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["key", "value", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "key",
+                "value",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        value: typing.Union[MetaOapg.properties.value, str, ],
-        key: typing.Union[MetaOapg.properties.key, str, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        value: typing.Union[
+            MetaOapg.properties.value,
+            str,
+        ],
+        key: typing.Union[
+            MetaOapg.properties.key,
+            str,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricTagV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricTagV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_value_v1.py b/domino/_impl/custommetrics/model/metric_value_v1.py
index ef8cc3fe..8b6ddc2d 100644
--- a/domino/_impl/custommetrics/model/metric_value_v1.py
+++ b/domino/_impl/custommetrics/model/metric_value_v1.py
@@ -1,32 +1,31 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from .metric_tag_v1 import MetricTagV1
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
+from .metric_tag_v1 import MetricTagV1
+
 
-class MetricValueV1(
-    schemas.DictSchema
-):
+class MetricValueV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -44,29 +43,30 @@ class properties:
             referenceTimestamp = schemas.StrSchema
             value = schemas.NumberSchema
 
-            class tags(
-                schemas.ListSchema
-            ):
+            class tags(schemas.ListSchema):
 
                 class MetaOapg:
 
                     @staticmethod
-                    def items() -> typing.Type['MetricTagV1']:
+                    def items() -> typing.Type["MetricTagV1"]:
                         return MetricTagV1
 
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['MetricTagV1'], typing.List['MetricTagV1']],
+                    arg: typing.Union[
+                        typing.Tuple["MetricTagV1"], typing.List["MetricTagV1"]
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'tags':
+                ) -> "tags":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
 
-                def __getitem__(self, i: int) -> 'MetricTagV1':
+                def __getitem__(self, i: int) -> "MetricTagV1":
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "referenceTimestamp": referenceTimestamp,
                 "value": value,
@@ -78,45 +78,109 @@ def __getitem__(self, i: int) -> 'MetricTagV1':
     tags: MetaOapg.properties.tags
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> MetaOapg.properties.tags: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["referenceTimestamp", "value", "tags", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "referenceTimestamp",
+                "value",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> MetaOapg.properties.tags: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["referenceTimestamp", "value", "tags", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "referenceTimestamp",
+                "value",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        referenceTimestamp: typing.Union[MetaOapg.properties.referenceTimestamp, str, ],
-        value: typing.Union[MetaOapg.properties.value, decimal.Decimal, int, float, ],
-        tags: typing.Union[MetaOapg.properties.tags, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        referenceTimestamp: typing.Union[
+            MetaOapg.properties.referenceTimestamp,
+            str,
+        ],
+        value: typing.Union[
+            MetaOapg.properties.value,
+            decimal.Decimal,
+            int,
+            float,
+        ],
+        tags: typing.Union[
+            MetaOapg.properties.tags,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricValueV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricValueV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_value_v1.pyi b/domino/_impl/custommetrics/model/metric_value_v1.pyi
index d488a187..73579804 100644
--- a/domino/_impl/custommetrics/model/metric_value_v1.pyi
+++ b/domino/_impl/custommetrics/model/metric_value_v1.pyi
@@ -1,126 +1,173 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class MetricValueV1(
-    schemas.DictSchema
-):
+class MetricValueV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "referenceTimestamp",
             "value",
             "tags",
         }
-        
+
         class properties:
             referenceTimestamp = schemas.StrSchema
             value = schemas.NumberSchema
-            
-            
-            class tags(
-                schemas.ListSchema
-            ):
-            
-            
+
+            class tags(schemas.ListSchema):
                 class MetaOapg:
-                    
                     @staticmethod
-                    def items() -> typing.Type['MetricTagV1']:
+                    def items() -> typing.Type["MetricTagV1"]:
                         return MetricTagV1
-            
+
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['MetricTagV1'], typing.List['MetricTagV1']],
+                    arg: typing.Union[
+                        typing.Tuple["MetricTagV1"], typing.List["MetricTagV1"]
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'tags':
+                ) -> "tags":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
-            
-                def __getitem__(self, i: int) -> 'MetricTagV1':
+
+                def __getitem__(self, i: int) -> "MetricTagV1":
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "referenceTimestamp": referenceTimestamp,
                 "value": value,
                 "tags": tags,
             }
-    
+
     referenceTimestamp: MetaOapg.properties.referenceTimestamp
     value: MetaOapg.properties.value
     tags: MetaOapg.properties.tags
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> MetaOapg.properties.tags: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["referenceTimestamp", "value", "tags", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "referenceTimestamp",
+                "value",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> MetaOapg.properties.tags: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["referenceTimestamp", "value", "tags", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "referenceTimestamp",
+                "value",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        referenceTimestamp: typing.Union[MetaOapg.properties.referenceTimestamp, str, ],
-        value: typing.Union[MetaOapg.properties.value, decimal.Decimal, int, float, ],
-        tags: typing.Union[MetaOapg.properties.tags, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        referenceTimestamp: typing.Union[
+            MetaOapg.properties.referenceTimestamp,
+            str,
+        ],
+        value: typing.Union[
+            MetaOapg.properties.value,
+            decimal.Decimal,
+            int,
+            float,
+        ],
+        tags: typing.Union[
+            MetaOapg.properties.tags,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricValueV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricValueV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_values_envelope_v1.py b/domino/_impl/custommetrics/model/metric_values_envelope_v1.py
index 2f410b9c..2a2e4689 100644
--- a/domino/_impl/custommetrics/model/metric_values_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/metric_values_envelope_v1.py
@@ -1,33 +1,32 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from .metadata_v1 import MetadataV1
-from .metric_value_v1 import MetricValueV1
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
+from .metadata_v1 import MetadataV1
+from .metric_value_v1 import MetricValueV1
+
 
-class MetricValuesEnvelopeV1(
-    schemas.DictSchema
-):
+class MetricValuesEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -42,74 +41,125 @@ class MetaOapg:
 
         class properties:
 
-            class metricValues(
-                schemas.ListSchema
-            ):
+            class metricValues(schemas.ListSchema):
 
                 class MetaOapg:
 
                     @staticmethod
-                    def items() -> typing.Type['MetricValueV1']:
+                    def items() -> typing.Type["MetricValueV1"]:
                         return MetricValueV1
 
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['MetricValueV1'], typing.List['MetricValueV1']],
+                    arg: typing.Union[
+                        typing.Tuple["MetricValueV1"], typing.List["MetricValueV1"]
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'metricValues':
+                ) -> "metricValues":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
 
-                def __getitem__(self, i: int) -> 'MetricValueV1':
+                def __getitem__(self, i: int) -> "MetricValueV1":
                     return super().__getitem__(i)
 
             @staticmethod
-            def metadata() -> typing.Type['MetadataV1']:
+            def metadata() -> typing.Type["MetadataV1"]:
                 return MetadataV1
+
             __annotations__ = {
                 "metricValues": metricValues,
                 "metadata": metadata,
             }
 
-    metadata: 'MetadataV1'
+    metadata: "MetadataV1"
     metricValues: MetaOapg.properties.metricValues
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metricValues"]) -> MetaOapg.properties.metricValues: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["metricValues"]
+    ) -> MetaOapg.properties.metricValues: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> 'MetadataV1': ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["metadata"]
+    ) -> "MetadataV1": ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["metricValues", "metadata", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "metricValues",
+                "metadata",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metricValues"]) -> MetaOapg.properties.metricValues: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metricValues"]
+    ) -> MetaOapg.properties.metricValues: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> 'MetadataV1': ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metadata"]
+    ) -> "MetadataV1": ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["metricValues", "metadata", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "metricValues",
+                "metadata",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        metadata: 'MetadataV1',
-        metricValues: typing.Union[MetaOapg.properties.metricValues, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        metadata: "MetadataV1",
+        metricValues: typing.Union[
+            MetaOapg.properties.metricValues,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricValuesEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricValuesEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/metric_values_envelope_v1.pyi b/domino/_impl/custommetrics/model/metric_values_envelope_v1.pyi
index 19f612af..c369341f 100644
--- a/domino/_impl/custommetrics/model/metric_values_envelope_v1.pyi
+++ b/domino/_impl/custommetrics/model/metric_values_envelope_v1.pyi
@@ -1,118 +1,151 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class MetricValuesEnvelopeV1(
-    schemas.DictSchema
-):
+class MetricValuesEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "metadata",
             "metricValues",
         }
-        
+
         class properties:
-            
-            
-            class metricValues(
-                schemas.ListSchema
-            ):
-            
-            
+            class metricValues(schemas.ListSchema):
                 class MetaOapg:
-                    
                     @staticmethod
-                    def items() -> typing.Type['MetricValueV1']:
+                    def items() -> typing.Type["MetricValueV1"]:
                         return MetricValueV1
-            
+
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['MetricValueV1'], typing.List['MetricValueV1']],
+                    arg: typing.Union[
+                        typing.Tuple["MetricValueV1"], typing.List["MetricValueV1"]
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'metricValues':
+                ) -> "metricValues":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
-            
-                def __getitem__(self, i: int) -> 'MetricValueV1':
+
+                def __getitem__(self, i: int) -> "MetricValueV1":
                     return super().__getitem__(i)
-        
+
             @staticmethod
-            def metadata() -> typing.Type['MetadataV1']:
+            def metadata() -> typing.Type["MetadataV1"]:
                 return MetadataV1
             __annotations__ = {
                 "metricValues": metricValues,
                 "metadata": metadata,
             }
-    
-    metadata: 'MetadataV1'
+
+    metadata: "MetadataV1"
     metricValues: MetaOapg.properties.metricValues
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metricValues"]) -> MetaOapg.properties.metricValues: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["metricValues"]
+    ) -> MetaOapg.properties.metricValues: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> 'MetadataV1': ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["metadata"]
+    ) -> "MetadataV1": ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["metricValues", "metadata", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "metricValues",
+                "metadata",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metricValues"]) -> MetaOapg.properties.metricValues: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metricValues"]
+    ) -> MetaOapg.properties.metricValues: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> 'MetadataV1': ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metadata"]
+    ) -> "MetadataV1": ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["metricValues", "metadata", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "metricValues",
+                "metadata",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        metadata: 'MetadataV1',
-        metricValues: typing.Union[MetaOapg.properties.metricValues, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        metadata: "MetadataV1",
+        metricValues: typing.Union[
+            MetaOapg.properties.metricValues,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'MetricValuesEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "MetricValuesEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/new_metric_value_v1.py b/domino/_impl/custommetrics/model/new_metric_value_v1.py
index de20f9f4..4c283169 100644
--- a/domino/_impl/custommetrics/model/new_metric_value_v1.py
+++ b/domino/_impl/custommetrics/model/new_metric_value_v1.py
@@ -1,32 +1,31 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from .metric_tag_v1 import MetricTagV1
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
+from .metric_tag_v1 import MetricTagV1
+
 
-class NewMetricValueV1(
-    schemas.DictSchema
-):
+class NewMetricValueV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -47,29 +46,30 @@ class properties:
             value = schemas.NumberSchema
             referenceTimestamp = schemas.StrSchema
 
-            class tags(
-                schemas.ListSchema
-            ):
+            class tags(schemas.ListSchema):
 
                 class MetaOapg:
 
                     @staticmethod
-                    def items() -> typing.Type['MetricTagV1']:
+                    def items() -> typing.Type["MetricTagV1"]:
                         return MetricTagV1
 
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['MetricTagV1'], typing.List['MetricTagV1']],
+                    arg: typing.Union[
+                        typing.Tuple["MetricTagV1"], typing.List["MetricTagV1"]
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'tags':
+                ) -> "tags":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
 
-                def __getitem__(self, i: int) -> 'MetricTagV1':
+                def __getitem__(self, i: int) -> "MetricTagV1":
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "modelMonitoringId": modelMonitoringId,
                 "metric": metric,
@@ -84,59 +84,139 @@ def __getitem__(self, i: int) -> 'MetricTagV1':
     value: MetaOapg.properties.value
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> MetaOapg.properties.tags: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "referenceTimestamp", "tags", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "referenceTimestamp",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["tags"]) -> typing.Union[MetaOapg.properties.tags, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> typing.Union[MetaOapg.properties.tags, schemas.Unset]: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "referenceTimestamp", "tags", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "referenceTimestamp",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        referenceTimestamp: typing.Union[MetaOapg.properties.referenceTimestamp, str, ],
-        metric: typing.Union[MetaOapg.properties.metric, str, ],
-        modelMonitoringId: typing.Union[MetaOapg.properties.modelMonitoringId, str, ],
-        value: typing.Union[MetaOapg.properties.value, decimal.Decimal, int, float, ],
-        tags: typing.Union[MetaOapg.properties.tags, list, tuple, schemas.Unset] = schemas.unset,
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        referenceTimestamp: typing.Union[
+            MetaOapg.properties.referenceTimestamp,
+            str,
+        ],
+        metric: typing.Union[
+            MetaOapg.properties.metric,
+            str,
+        ],
+        modelMonitoringId: typing.Union[
+            MetaOapg.properties.modelMonitoringId,
+            str,
+        ],
+        value: typing.Union[
+            MetaOapg.properties.value,
+            decimal.Decimal,
+            int,
+            float,
+        ],
+        tags: typing.Union[
+            MetaOapg.properties.tags, list, tuple, schemas.Unset
+        ] = schemas.unset,
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'NewMetricValueV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "NewMetricValueV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/new_metric_value_v1.pyi b/domino/_impl/custommetrics/model/new_metric_value_v1.pyi
index 7d00e74e..f3a3e53c 100644
--- a/domino/_impl/custommetrics/model/new_metric_value_v1.pyi
+++ b/domino/_impl/custommetrics/model/new_metric_value_v1.pyi
@@ -1,38 +1,34 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class NewMetricValueV1(
-    schemas.DictSchema
-):
+class NewMetricValueV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "referenceTimestamp",
@@ -40,38 +36,35 @@ class NewMetricValueV1(
             "modelMonitoringId",
             "value",
         }
-        
+
         class properties:
             modelMonitoringId = schemas.StrSchema
             metric = schemas.StrSchema
             value = schemas.NumberSchema
             referenceTimestamp = schemas.StrSchema
-            
-            
-            class tags(
-                schemas.ListSchema
-            ):
-            
-            
+
+            class tags(schemas.ListSchema):
                 class MetaOapg:
-                    
                     @staticmethod
-                    def items() -> typing.Type['MetricTagV1']:
+                    def items() -> typing.Type["MetricTagV1"]:
                         return MetricTagV1
-            
+
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['MetricTagV1'], typing.List['MetricTagV1']],
+                    arg: typing.Union[
+                        typing.Tuple["MetricTagV1"], typing.List["MetricTagV1"]
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'tags':
+                ) -> "tags":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
-            
-                def __getitem__(self, i: int) -> 'MetricTagV1':
+
+                def __getitem__(self, i: int) -> "MetricTagV1":
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "modelMonitoringId": modelMonitoringId,
                 "metric": metric,
@@ -79,68 +72,134 @@ class NewMetricValueV1(
                 "referenceTimestamp": referenceTimestamp,
                 "tags": tags,
             }
-    
+
     referenceTimestamp: MetaOapg.properties.referenceTimestamp
     metric: MetaOapg.properties.metric
     modelMonitoringId: MetaOapg.properties.modelMonitoringId
     value: MetaOapg.properties.value
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["tags"]) -> MetaOapg.properties.tags: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> MetaOapg.properties.tags: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "referenceTimestamp", "tags", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "referenceTimestamp",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["modelMonitoringId"]) -> MetaOapg.properties.modelMonitoringId: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["modelMonitoringId"]
+    ) -> MetaOapg.properties.modelMonitoringId: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["metric"]) -> MetaOapg.properties.metric: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["metric"]
+    ) -> MetaOapg.properties.metric: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["value"]) -> MetaOapg.properties.value: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["value"]
+    ) -> MetaOapg.properties.value: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["referenceTimestamp"]) -> MetaOapg.properties.referenceTimestamp: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["referenceTimestamp"]
+    ) -> MetaOapg.properties.referenceTimestamp: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["tags"]) -> typing.Union[MetaOapg.properties.tags, schemas.Unset]: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["tags"]
+    ) -> typing.Union[MetaOapg.properties.tags, schemas.Unset]: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["modelMonitoringId", "metric", "value", "referenceTimestamp", "tags", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "modelMonitoringId",
+                "metric",
+                "value",
+                "referenceTimestamp",
+                "tags",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        referenceTimestamp: typing.Union[MetaOapg.properties.referenceTimestamp, str, ],
-        metric: typing.Union[MetaOapg.properties.metric, str, ],
-        modelMonitoringId: typing.Union[MetaOapg.properties.modelMonitoringId, str, ],
-        value: typing.Union[MetaOapg.properties.value, decimal.Decimal, int, float, ],
-        tags: typing.Union[MetaOapg.properties.tags, list, tuple, schemas.Unset] = schemas.unset,
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        referenceTimestamp: typing.Union[
+            MetaOapg.properties.referenceTimestamp,
+            str,
+        ],
+        metric: typing.Union[
+            MetaOapg.properties.metric,
+            str,
+        ],
+        modelMonitoringId: typing.Union[
+            MetaOapg.properties.modelMonitoringId,
+            str,
+        ],
+        value: typing.Union[
+            MetaOapg.properties.value,
+            decimal.Decimal,
+            int,
+            float,
+        ],
+        tags: typing.Union[
+            MetaOapg.properties.tags, list, tuple, schemas.Unset
+        ] = schemas.unset,
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'NewMetricValueV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "NewMetricValueV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py b/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py
index 4dfa62b3..e202d570 100644
--- a/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py
+++ b/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.py
@@ -1,32 +1,31 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from .new_metric_value_v1 import NewMetricValueV1
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
+from .new_metric_value_v1 import NewMetricValueV1
+
 
-class NewMetricValuesEnvelopeV1(
-    schemas.DictSchema
-):
+class NewMetricValuesEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -40,29 +39,31 @@ class MetaOapg:
 
         class properties:
 
-            class newMetricValues(
-                schemas.ListSchema
-            ):
+            class newMetricValues(schemas.ListSchema):
 
                 class MetaOapg:
 
                     @staticmethod
-                    def items() -> typing.Type['NewMetricValueV1']:
+                    def items() -> typing.Type["NewMetricValueV1"]:
                         return NewMetricValueV1
 
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['NewMetricValueV1'], typing.List['NewMetricValueV1']],
+                    arg: typing.Union[
+                        typing.Tuple["NewMetricValueV1"],
+                        typing.List["NewMetricValueV1"],
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'newMetricValues':
+                ) -> "newMetricValues":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
 
-                def __getitem__(self, i: int) -> 'NewMetricValueV1':
+                def __getitem__(self, i: int) -> "NewMetricValueV1":
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "newMetricValues": newMetricValues,
             }
@@ -70,31 +71,63 @@ def __getitem__(self, i: int) -> 'NewMetricValueV1':
     newMetricValues: MetaOapg.properties.newMetricValues
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["newMetricValues"]) -> MetaOapg.properties.newMetricValues: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["newMetricValues"]
+    ) -> MetaOapg.properties.newMetricValues: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["newMetricValues", ], str]):
+    def __getitem__(
+        self, name: typing.Union[typing_extensions.Literal["newMetricValues",], str]
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["newMetricValues"]) -> MetaOapg.properties.newMetricValues: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["newMetricValues"]
+    ) -> MetaOapg.properties.newMetricValues: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
 
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["newMetricValues", ], str]):
+    def get_item_oapg(
+        self, name: typing.Union[typing_extensions.Literal["newMetricValues",], str]
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        newMetricValues: typing.Union[MetaOapg.properties.newMetricValues, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        newMetricValues: typing.Union[
+            MetaOapg.properties.newMetricValues,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'NewMetricValuesEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "NewMetricValuesEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.pyi b/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.pyi
index 22d3b632..c4abf134 100644
--- a/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.pyi
+++ b/domino/_impl/custommetrics/model/new_metric_values_envelope_v1.pyi
@@ -1,104 +1,123 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class NewMetricValuesEnvelopeV1(
-    schemas.DictSchema
-):
+class NewMetricValuesEnvelopeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "newMetricValues",
         }
-        
+
         class properties:
-            
-            
-            class newMetricValues(
-                schemas.ListSchema
-            ):
-            
-            
+            class newMetricValues(schemas.ListSchema):
                 class MetaOapg:
-                    
                     @staticmethod
-                    def items() -> typing.Type['NewMetricValueV1']:
+                    def items() -> typing.Type["NewMetricValueV1"]:
                         return NewMetricValueV1
-            
+
                 def __new__(
                     cls,
-                    arg: typing.Union[typing.Tuple['NewMetricValueV1'], typing.List['NewMetricValueV1']],
+                    arg: typing.Union[
+                        typing.Tuple["NewMetricValueV1"],
+                        typing.List["NewMetricValueV1"],
+                    ],
                     _configuration: typing.Optional[schemas.Configuration] = None,
-                ) -> 'newMetricValues':
+                ) -> "newMetricValues":
                     return super().__new__(
                         cls,
                         arg,
                         _configuration=_configuration,
                     )
-            
-                def __getitem__(self, i: int) -> 'NewMetricValueV1':
+
+                def __getitem__(self, i: int) -> "NewMetricValueV1":
                     return super().__getitem__(i)
+
             __annotations__ = {
                 "newMetricValues": newMetricValues,
             }
-    
+
     newMetricValues: MetaOapg.properties.newMetricValues
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["newMetricValues"]) -> MetaOapg.properties.newMetricValues: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["newMetricValues"]
+    ) -> MetaOapg.properties.newMetricValues: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["newMetricValues", ], str]):
+    def __getitem__(
+        self, name: typing.Union[typing_extensions.Literal["newMetricValues",], str]
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["newMetricValues"]) -> MetaOapg.properties.newMetricValues: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["newMetricValues"]
+    ) -> MetaOapg.properties.newMetricValues: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["newMetricValues", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: typing.Union[typing_extensions.Literal["newMetricValues",], str]
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        newMetricValues: typing.Union[MetaOapg.properties.newMetricValues, list, tuple, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        newMetricValues: typing.Union[
+            MetaOapg.properties.newMetricValues,
+            list,
+            tuple,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'NewMetricValuesEnvelopeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "NewMetricValuesEnvelopeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/target_range_v1.py b/domino/_impl/custommetrics/model/target_range_v1.py
index 25457ad4..4e4ef233 100644
--- a/domino/_impl/custommetrics/model/target_range_v1.py
+++ b/domino/_impl/custommetrics/model/target_range_v1.py
@@ -1,31 +1,29 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
 
-class TargetRangeV1(
-    schemas.DictSchema
-):
+class TargetRangeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
@@ -39,10 +37,7 @@ class MetaOapg:
 
         class properties:
 
-            class condition(
-                schemas.EnumBase,
-                schemas.StrSchema
-            ):
+            class condition(schemas.EnumBase, schemas.StrSchema):
 
                 class MetaOapg:
                     enum_value_to_name = {
@@ -72,6 +67,7 @@ def GREATER_THAN_EQUAL(cls):
                 @schemas.classproperty
                 def BETWEEN(cls):
                     return cls("between")
+
             lowerLimit = schemas.NumberSchema
             upperLimit = schemas.NumberSchema
             __annotations__ = {
@@ -83,45 +79,104 @@ def BETWEEN(cls):
     condition: MetaOapg.properties.condition
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["condition"]) -> MetaOapg.properties.condition: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["condition"]
+    ) -> MetaOapg.properties.condition: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["lowerLimit"]) -> MetaOapg.properties.lowerLimit: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["lowerLimit"]
+    ) -> MetaOapg.properties.lowerLimit: ...
 
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["upperLimit"]) -> MetaOapg.properties.upperLimit: ...
+    def __getitem__(
+        self, name: typing_extensions.Literal["upperLimit"]
+    ) -> MetaOapg.properties.upperLimit: ...
 
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
 
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["condition", "lowerLimit", "upperLimit", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "condition",
+                "lowerLimit",
+                "upperLimit",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["condition"]) -> MetaOapg.properties.condition: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["condition"]
+    ) -> MetaOapg.properties.condition: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["lowerLimit"]) -> typing.Union[MetaOapg.properties.lowerLimit, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["lowerLimit"]
+    ) -> typing.Union[MetaOapg.properties.lowerLimit, schemas.Unset]: ...
 
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["upperLimit"]) -> typing.Union[MetaOapg.properties.upperLimit, schemas.Unset]: ...
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["upperLimit"]
+    ) -> typing.Union[MetaOapg.properties.upperLimit, schemas.Unset]: ...
 
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["condition", "lowerLimit", "upperLimit", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "condition",
+                "lowerLimit",
+                "upperLimit",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        condition: typing.Union[MetaOapg.properties.condition, str, ],
-        lowerLimit: typing.Union[MetaOapg.properties.lowerLimit, decimal.Decimal, int, float, schemas.Unset] = schemas.unset,
-        upperLimit: typing.Union[MetaOapg.properties.upperLimit, decimal.Decimal, int, float, schemas.Unset] = schemas.unset,
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        condition: typing.Union[
+            MetaOapg.properties.condition,
+            str,
+        ],
+        lowerLimit: typing.Union[
+            MetaOapg.properties.lowerLimit, decimal.Decimal, int, float, schemas.Unset
+        ] = schemas.unset,
+        upperLimit: typing.Union[
+            MetaOapg.properties.upperLimit, decimal.Decimal, int, float, schemas.Unset
+        ] = schemas.unset,
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'TargetRangeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "TargetRangeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/model/target_range_v1.pyi b/domino/_impl/custommetrics/model/target_range_v1.pyi
index cff156dc..8e1dff62 100644
--- a/domino/_impl/custommetrics/model/target_range_v1.pyi
+++ b/domino/_impl/custommetrics/model/target_range_v1.pyi
@@ -1,70 +1,61 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
-import typing_extensions  # noqa: F401
 import uuid  # noqa: F401
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions  # noqa: F401
 
 from domino._impl.custommetrics import schemas  # noqa: F401
 
-
-class TargetRangeV1(
-    schemas.DictSchema
-):
+class TargetRangeV1(schemas.DictSchema):
     """NOTE: This class is auto generated by OpenAPI Generator.
     Ref: https://openapi-generator.tech
 
     Do not edit the class manually.
     """
 
-
     class MetaOapg:
         required = {
             "condition",
         }
-        
+
         class properties:
-            
-            
-            class condition(
-                schemas.EnumBase,
-                schemas.StrSchema
-            ):
-                
+            class condition(schemas.EnumBase, schemas.StrSchema):
                 @schemas.classproperty
                 def LESS_THAN(cls):
                     return cls("lessThan")
-                
+
                 @schemas.classproperty
                 def LESS_THAN_EQUAL(cls):
                     return cls("lessThanEqual")
-                
+
                 @schemas.classproperty
                 def GREATER_THAN(cls):
                     return cls("greaterThan")
-                
+
                 @schemas.classproperty
                 def GREATER_THAN_EQUAL(cls):
                     return cls("greaterThanEqual")
-                
+
                 @schemas.classproperty
                 def BETWEEN(cls):
                     return cls("between")
+
             lowerLimit = schemas.NumberSchema
             upperLimit = schemas.NumberSchema
             __annotations__ = {
@@ -72,51 +63,100 @@ class TargetRangeV1(
                 "lowerLimit": lowerLimit,
                 "upperLimit": upperLimit,
             }
-    
+
     condition: MetaOapg.properties.condition
-    
+
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["condition"]) -> MetaOapg.properties.condition: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["condition"]
+    ) -> MetaOapg.properties.condition: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["lowerLimit"]) -> MetaOapg.properties.lowerLimit: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["lowerLimit"]
+    ) -> MetaOapg.properties.lowerLimit: ...
     @typing.overload
-    def __getitem__(self, name: typing_extensions.Literal["upperLimit"]) -> MetaOapg.properties.upperLimit: ...
-    
+    def __getitem__(
+        self, name: typing_extensions.Literal["upperLimit"]
+    ) -> MetaOapg.properties.upperLimit: ...
     @typing.overload
     def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
-    
-    def __getitem__(self, name: typing.Union[typing_extensions.Literal["condition", "lowerLimit", "upperLimit", ], str]):
+    def __getitem__(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "condition",
+                "lowerLimit",
+                "upperLimit",
+            ],
+            str,
+        ],
+    ):
         # dict_instance[name] accessor
         return super().__getitem__(name)
-    
-    
+
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["condition"]) -> MetaOapg.properties.condition: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["condition"]
+    ) -> MetaOapg.properties.condition: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["lowerLimit"]) -> typing.Union[MetaOapg.properties.lowerLimit, schemas.Unset]: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["lowerLimit"]
+    ) -> typing.Union[MetaOapg.properties.lowerLimit, schemas.Unset]: ...
     @typing.overload
-    def get_item_oapg(self, name: typing_extensions.Literal["upperLimit"]) -> typing.Union[MetaOapg.properties.upperLimit, schemas.Unset]: ...
-    
+    def get_item_oapg(
+        self, name: typing_extensions.Literal["upperLimit"]
+    ) -> typing.Union[MetaOapg.properties.upperLimit, schemas.Unset]: ...
     @typing.overload
-    def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
-    
-    def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["condition", "lowerLimit", "upperLimit", ], str]):
+    def get_item_oapg(
+        self, name: str
+    ) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
+    def get_item_oapg(
+        self,
+        name: typing.Union[
+            typing_extensions.Literal[
+                "condition",
+                "lowerLimit",
+                "upperLimit",
+            ],
+            str,
+        ],
+    ):
         return super().get_item_oapg(name)
-    
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, ],
-        condition: typing.Union[MetaOapg.properties.condition, str, ],
-        lowerLimit: typing.Union[MetaOapg.properties.lowerLimit, decimal.Decimal, int, float, schemas.Unset] = schemas.unset,
-        upperLimit: typing.Union[MetaOapg.properties.upperLimit, decimal.Decimal, int, float, schemas.Unset] = schemas.unset,
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+        ],
+        condition: typing.Union[
+            MetaOapg.properties.condition,
+            str,
+        ],
+        lowerLimit: typing.Union[
+            MetaOapg.properties.lowerLimit, decimal.Decimal, int, float, schemas.Unset
+        ] = schemas.unset,
+        upperLimit: typing.Union[
+            MetaOapg.properties.upperLimit, decimal.Decimal, int, float, schemas.Unset
+        ] = schemas.unset,
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'TargetRangeV1':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "TargetRangeV1":
         return super().__new__(
             cls,
             *args,
diff --git a/domino/_impl/custommetrics/models/__init__.py b/domino/_impl/custommetrics/models/__init__.py
index 10dc31ca..6ad42adf 100644
--- a/domino/_impl/custommetrics/models/__init__.py
+++ b/domino/_impl/custommetrics/models/__init__.py
@@ -12,12 +12,20 @@
 # sys.setrecursionlimit(n)
 
 from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
+from domino._impl.custommetrics.model.invalid_body_envelope_v1 import (
+    InvalidBodyEnvelopeV1,
+)
 from domino._impl.custommetrics.model.metadata_v1 import MetadataV1
-from domino._impl.custommetrics.model.metric_alert_request_v1 import MetricAlertRequestV1
+from domino._impl.custommetrics.model.metric_alert_request_v1 import (
+    MetricAlertRequestV1,
+)
 from domino._impl.custommetrics.model.metric_tag_v1 import MetricTagV1
 from domino._impl.custommetrics.model.metric_value_v1 import MetricValueV1
-from domino._impl.custommetrics.model.metric_values_envelope_v1 import MetricValuesEnvelopeV1
+from domino._impl.custommetrics.model.metric_values_envelope_v1 import (
+    MetricValuesEnvelopeV1,
+)
 from domino._impl.custommetrics.model.new_metric_value_v1 import NewMetricValueV1
-from domino._impl.custommetrics.model.new_metric_values_envelope_v1 import NewMetricValuesEnvelopeV1
+from domino._impl.custommetrics.model.new_metric_values_envelope_v1 import (
+    NewMetricValuesEnvelopeV1,
+)
 from domino._impl.custommetrics.model.target_range_v1 import TargetRangeV1
diff --git a/domino/_impl/custommetrics/paths/__init__.py b/domino/_impl/custommetrics/paths/__init__.py
index fb4c2e99..ba3ad7d7 100644
--- a/domino/_impl/custommetrics/paths/__init__.py
+++ b/domino/_impl/custommetrics/paths/__init__.py
@@ -8,4 +8,6 @@
 class PathValues(str, enum.Enum):
     API_METRIC_ALERTS_V1 = "/api/metricAlerts/v1"
     API_METRIC_VALUES_V1 = "/api/metricValues/v1"
-    API_METRIC_VALUES_V1_MODEL_MONITORING_ID_METRIC = "/api/metricValues/v1/{modelMonitoringId}/{metric}"
+    API_METRIC_VALUES_V1_MODEL_MONITORING_ID_METRIC = (
+        "/api/metricValues/v1/{modelMonitoringId}/{metric}"
+    )
diff --git a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py
index f820cafd..a68110c7 100644
--- a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py
+++ b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.py
@@ -3,30 +3,32 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
-from dataclasses import dataclass
-import typing_extensions
-import urllib3
-from urllib3._collections import HTTPHeaderDict
-
-from domino._impl.custommetrics import api_client, exceptions
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
 import uuid  # noqa: F401
+from dataclasses import dataclass
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions
+import urllib3
+from urllib3._collections import HTTPHeaderDict
 
 from domino._impl.custommetrics import schemas  # noqa: F401
-
+from domino._impl.custommetrics import api_client, exceptions
 from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
-from domino._impl.custommetrics.model.metric_alert_request_v1 import MetricAlertRequestV1
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
+from domino._impl.custommetrics.model.invalid_body_envelope_v1 import (
+    InvalidBodyEnvelopeV1,
+)
+from domino._impl.custommetrics.model.metric_alert_request_v1 import (
+    MetricAlertRequestV1,
+)
 
 from . import path
 
@@ -36,8 +38,9 @@
 
 request_body_metric_alert_request_v1 = api_client.RequestBody(
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaForRequestBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaForRequestBodyApplicationJson
+        ),
     },
     required=True,
 )
@@ -78,10 +81,42 @@ def one_of(cls):
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            bool,
+            None,
+            list,
+            tuple,
+            bytes,
+            io.FileIO,
+            io.BufferedReader,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'SchemaFor400ResponseBodyApplicationJson':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "SchemaFor400ResponseBodyApplicationJson":
         return super().__new__(
             cls,
             *args,
@@ -93,17 +128,16 @@ def __new__(
 @dataclass
 class ApiResponseFor400(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor400ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor400ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_400 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor400,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor400ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor400ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor401ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -112,17 +146,16 @@ class ApiResponseFor400(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor401(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor401ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor401ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_401 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor401,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor401ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor401ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor403ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -131,17 +164,16 @@ class ApiResponseFor401(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor403(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor403ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor403ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_403 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor403,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor403ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor403ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor404ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -150,17 +182,16 @@ class ApiResponseFor403(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor404(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor404ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor404ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_404 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor404,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor404ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor404ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor500ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -169,30 +200,27 @@ class ApiResponseFor404(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor500(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor500ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor500ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_500 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor500,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor500ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor500ResponseBodyApplicationJson
+        ),
     },
 )
 _status_code_to_response = {
-    '200': _response_for_200,
-    '400': _response_for_400,
-    '401': _response_for_401,
-    '403': _response_for_403,
-    '404': _response_for_404,
-    '500': _response_for_500,
+    "200": _response_for_200,
+    "400": _response_for_400,
+    "401": _response_for_401,
+    "403": _response_for_403,
+    "404": _response_for_404,
+    "500": _response_for_500,
 }
-_all_accept_content_types = (
-    'application/json',
-)
+_all_accept_content_types = ("application/json",)
 
 
 class BaseApi(api_client.Api):
@@ -205,9 +233,7 @@ def _send_metric_alert_oapg(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def _send_metric_alert_oapg(
@@ -218,9 +244,7 @@ def _send_metric_alert_oapg(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def _send_metric_alert_oapg(
@@ -250,7 +274,7 @@ def _send_metric_alert_oapg(
     def _send_metric_alert_oapg(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -268,22 +292,25 @@ class instances
         # TODO add cookie handling
         if accept_content_types:
             for accept_content_type in accept_content_types:
-                _headers.add('Accept', accept_content_type)
+                _headers.add("Accept", accept_content_type)
 
         if body is schemas.unset:
             raise exceptions.ApiValueError(
-                'The required body parameter has an invalid value of: unset. Set a valid value instead')
+                "The required body parameter has an invalid value of: unset. Set a valid value instead"
+            )
         _fields = None
         _body = None
-        serialized_data = request_body_metric_alert_request_v1.serialize(body, content_type)
-        _headers.add('Content-Type', content_type)
-        if 'fields' in serialized_data:
-            _fields = serialized_data['fields']
-        elif 'body' in serialized_data:
-            _body = serialized_data['body']
+        serialized_data = request_body_metric_alert_request_v1.serialize(
+            body, content_type
+        )
+        _headers.add("Content-Type", content_type)
+        if "fields" in serialized_data:
+            _fields = serialized_data["fields"]
+        elif "body" in serialized_data:
+            _body = serialized_data["body"]
         response = self.api_client.call_api(
             resource_path=used_path,
-            method='post'.upper(),
+            method="post".upper(),
             headers=_headers,
             fields=_fields,
             body=_body,
@@ -292,13 +319,19 @@ class instances
         )
 
         if skip_deserialization:
-            api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+            api_response = api_client.ApiResponseWithoutDeserialization(
+                response=response
+            )
         else:
             response_for_status = _status_code_to_response.get(str(response.status))
             if response_for_status:
-                api_response = response_for_status.deserialize(response, self.api_client.configuration)
+                api_response = response_for_status.deserialize(
+                    response, self.api_client.configuration
+                )
             else:
-                api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+                api_response = api_client.ApiResponseWithoutDeserialization(
+                    response=response
+                )
 
         if not 200 <= response.status <= 299:
             raise exceptions.ApiException(api_response=api_response)
@@ -318,9 +351,7 @@ def send_metric_alert(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def send_metric_alert(
@@ -331,9 +362,7 @@ def send_metric_alert(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def send_metric_alert(
@@ -363,7 +392,7 @@ def send_metric_alert(
     def send_metric_alert(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -375,7 +404,7 @@ def send_metric_alert(
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
 
 
@@ -391,9 +420,7 @@ def post(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def post(
@@ -404,9 +431,7 @@ def post(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def post(
@@ -436,7 +461,7 @@ def post(
     def post(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -448,5 +473,5 @@ def post(
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
diff --git a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi
index 7c889cbe..fcb410d8 100644
--- a/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi
+++ b/domino/_impl/custommetrics/paths/api_metric_alerts_v1/post.pyi
@@ -3,63 +3,59 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
-from dataclasses import dataclass
-import typing_extensions
-import urllib3
-from urllib3._collections import HTTPHeaderDict
-
-from domino._impl.custommetrics import api_client, exceptions
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
 import uuid  # noqa: F401
+from dataclasses import dataclass
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions
+import urllib3
+from urllib3._collections import HTTPHeaderDict
 
 from domino._impl.custommetrics import schemas  # noqa: F401
-
+from domino._impl.custommetrics import api_client, exceptions
 from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
-from domino._impl.custommetrics.model.metric_alert_request_v1 import MetricAlertRequestV1
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
+from domino._impl.custommetrics.model.invalid_body_envelope_v1 import (
+    InvalidBodyEnvelopeV1,
+)
+from domino._impl.custommetrics.model.metric_alert_request_v1 import (
+    MetricAlertRequestV1,
+)
 
 # body param
 SchemaForRequestBodyApplicationJson = MetricAlertRequestV1
 
-
 request_body_metric_alert_request_v1 = api_client.RequestBody(
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaForRequestBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaForRequestBodyApplicationJson
+        ),
     },
     required=True,
 )
 
-
 @dataclass
 class ApiResponseFor200(api_client.ApiResponse):
     response: urllib3.HTTPResponse
     body: schemas.Unset = schemas.unset
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_200 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor200,
 )
 
-
 class SchemaFor400ResponseBodyApplicationJson(
     schemas.ComposedSchema,
 ):
-
-
     class MetaOapg:
-        
         @classmethod
         @functools.lru_cache()
         def one_of(cls):
@@ -75,13 +71,44 @@ class SchemaFor400ResponseBodyApplicationJson(
                 InvalidBodyEnvelopeV1,
             ]
 
-
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            bool,
+            None,
+            list,
+            tuple,
+            bytes,
+            io.FileIO,
+            io.BufferedReader,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'SchemaFor400ResponseBodyApplicationJson':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "SchemaFor400ResponseBodyApplicationJson":
         return super().__new__(
             cls,
             *args,
@@ -89,103 +116,85 @@ class SchemaFor400ResponseBodyApplicationJson(
             **kwargs,
         )
 
-
 @dataclass
 class ApiResponseFor400(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor400ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor400ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_400 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor400,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor400ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor400ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor401ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor401(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor401ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor401ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_401 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor401,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor401ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor401ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor403ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor403(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor403ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor403ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_403 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor403,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor403ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor403ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor404ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor404(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor404ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor404ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_404 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor404,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor404ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor404ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor500ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor500(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor500ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor500ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_500 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor500,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor500ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor500ResponseBodyApplicationJson
+        ),
     },
 )
-_all_accept_content_types = (
-    'application/json',
-)
-
+_all_accept_content_types = ("application/json",)
 
 class BaseApi(api_client.Api):
     @typing.overload
@@ -197,10 +206,7 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def _send_metric_alert_oapg(
         self,
@@ -210,11 +216,7 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def _send_metric_alert_oapg(
         self,
@@ -225,7 +227,6 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def _send_metric_alert_oapg(
         self,
@@ -239,11 +240,10 @@ class BaseApi(api_client.Api):
         ApiResponseFor200,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def _send_metric_alert_oapg(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -261,22 +261,25 @@ class BaseApi(api_client.Api):
         # TODO add cookie handling
         if accept_content_types:
             for accept_content_type in accept_content_types:
-                _headers.add('Accept', accept_content_type)
+                _headers.add("Accept", accept_content_type)
 
         if body is schemas.unset:
             raise exceptions.ApiValueError(
-                'The required body parameter has an invalid value of: unset. Set a valid value instead')
+                "The required body parameter has an invalid value of: unset. Set a valid value instead"
+            )
         _fields = None
         _body = None
-        serialized_data = request_body_metric_alert_request_v1.serialize(body, content_type)
-        _headers.add('Content-Type', content_type)
-        if 'fields' in serialized_data:
-            _fields = serialized_data['fields']
-        elif 'body' in serialized_data:
-            _body = serialized_data['body']
+        serialized_data = request_body_metric_alert_request_v1.serialize(
+            body, content_type
+        )
+        _headers.add("Content-Type", content_type)
+        if "fields" in serialized_data:
+            _fields = serialized_data["fields"]
+        elif "body" in serialized_data:
+            _body = serialized_data["body"]
         response = self.api_client.call_api(
             resource_path=used_path,
-            method='post'.upper(),
+            method="post".upper(),
             headers=_headers,
             fields=_fields,
             body=_body,
@@ -285,20 +288,25 @@ class BaseApi(api_client.Api):
         )
 
         if skip_deserialization:
-            api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+            api_response = api_client.ApiResponseWithoutDeserialization(
+                response=response
+            )
         else:
             response_for_status = _status_code_to_response.get(str(response.status))
             if response_for_status:
-                api_response = response_for_status.deserialize(response, self.api_client.configuration)
+                api_response = response_for_status.deserialize(
+                    response, self.api_client.configuration
+                )
             else:
-                api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+                api_response = api_client.ApiResponseWithoutDeserialization(
+                    response=response
+                )
 
         if not 200 <= response.status <= 299:
             raise exceptions.ApiException(api_response=api_response)
 
         return api_response
 
-
 class SendMetricAlert(BaseApi):
     # this class is used by api classes that refer to endpoints with operationId fn names
 
@@ -311,10 +319,7 @@ class SendMetricAlert(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def send_metric_alert(
         self,
@@ -324,11 +329,7 @@ class SendMetricAlert(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def send_metric_alert(
         self,
@@ -339,7 +340,6 @@ class SendMetricAlert(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def send_metric_alert(
         self,
@@ -353,11 +353,10 @@ class SendMetricAlert(BaseApi):
         ApiResponseFor200,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def send_metric_alert(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -369,10 +368,9 @@ class SendMetricAlert(BaseApi):
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
 
-
 class ApiForpost(BaseApi):
     # this class is used by api classes that refer to endpoints by path and http method names
 
@@ -385,10 +383,7 @@ class ApiForpost(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def post(
         self,
@@ -398,11 +393,7 @@ class ApiForpost(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def post(
         self,
@@ -413,7 +404,6 @@ class ApiForpost(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def post(
         self,
@@ -427,11 +417,10 @@ class ApiForpost(BaseApi):
         ApiResponseFor200,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def post(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -443,7 +432,5 @@ class ApiForpost(BaseApi):
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
-
-
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py
index 01d4ffe0..cde6fc6f 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.py
@@ -3,30 +3,32 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
-from dataclasses import dataclass
-import typing_extensions
-import urllib3
-from urllib3._collections import HTTPHeaderDict
-
-from domino._impl.custommetrics import api_client, exceptions
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
 import uuid  # noqa: F401
+from dataclasses import dataclass
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions
+import urllib3
+from urllib3._collections import HTTPHeaderDict
 
 from domino._impl.custommetrics import schemas  # noqa: F401
-
+from domino._impl.custommetrics import api_client, exceptions
 from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
-from domino._impl.custommetrics.model.new_metric_values_envelope_v1 import NewMetricValuesEnvelopeV1
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
+from domino._impl.custommetrics.model.invalid_body_envelope_v1 import (
+    InvalidBodyEnvelopeV1,
+)
+from domino._impl.custommetrics.model.new_metric_values_envelope_v1 import (
+    NewMetricValuesEnvelopeV1,
+)
 
 from . import path
 
@@ -36,8 +38,9 @@
 
 request_body_new_metric_values_envelope_v1 = api_client.RequestBody(
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaForRequestBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaForRequestBodyApplicationJson
+        ),
     },
     required=True,
 )
@@ -78,10 +81,42 @@ def one_of(cls):
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            bool,
+            None,
+            list,
+            tuple,
+            bytes,
+            io.FileIO,
+            io.BufferedReader,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'SchemaFor400ResponseBodyApplicationJson':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "SchemaFor400ResponseBodyApplicationJson":
         return super().__new__(
             cls,
             *args,
@@ -93,17 +128,16 @@ def __new__(
 @dataclass
 class ApiResponseFor400(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor400ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor400ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_400 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor400,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor400ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor400ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor401ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -112,17 +146,16 @@ class ApiResponseFor400(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor401(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor401ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor401ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_401 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor401,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor401ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor401ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor403ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -131,17 +164,16 @@ class ApiResponseFor401(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor403(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor403ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor403ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_403 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor403,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor403ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor403ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor404ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -150,17 +182,16 @@ class ApiResponseFor403(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor404(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor404ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor404ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_404 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor404,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor404ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor404ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor500ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -169,30 +200,27 @@ class ApiResponseFor404(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor500(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor500ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor500ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_500 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor500,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor500ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor500ResponseBodyApplicationJson
+        ),
     },
 )
 _status_code_to_response = {
-    '201': _response_for_201,
-    '400': _response_for_400,
-    '401': _response_for_401,
-    '403': _response_for_403,
-    '404': _response_for_404,
-    '500': _response_for_500,
+    "201": _response_for_201,
+    "400": _response_for_400,
+    "401": _response_for_401,
+    "403": _response_for_403,
+    "404": _response_for_404,
+    "500": _response_for_500,
 }
-_all_accept_content_types = (
-    'application/json',
-)
+_all_accept_content_types = ("application/json",)
 
 
 class BaseApi(api_client.Api):
@@ -205,9 +233,7 @@ def _log_metric_values_oapg(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor201,]: ...
 
     @typing.overload
     def _log_metric_values_oapg(
@@ -218,9 +244,7 @@ def _log_metric_values_oapg(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor201,]: ...
 
     @typing.overload
     def _log_metric_values_oapg(
@@ -250,7 +274,7 @@ def _log_metric_values_oapg(
     def _log_metric_values_oapg(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -268,22 +292,25 @@ class instances
         # TODO add cookie handling
         if accept_content_types:
             for accept_content_type in accept_content_types:
-                _headers.add('Accept', accept_content_type)
+                _headers.add("Accept", accept_content_type)
 
         if body is schemas.unset:
             raise exceptions.ApiValueError(
-                'The required body parameter has an invalid value of: unset. Set a valid value instead')
+                "The required body parameter has an invalid value of: unset. Set a valid value instead"
+            )
         _fields = None
         _body = None
-        serialized_data = request_body_new_metric_values_envelope_v1.serialize(body, content_type)
-        _headers.add('Content-Type', content_type)
-        if 'fields' in serialized_data:
-            _fields = serialized_data['fields']
-        elif 'body' in serialized_data:
-            _body = serialized_data['body']
+        serialized_data = request_body_new_metric_values_envelope_v1.serialize(
+            body, content_type
+        )
+        _headers.add("Content-Type", content_type)
+        if "fields" in serialized_data:
+            _fields = serialized_data["fields"]
+        elif "body" in serialized_data:
+            _body = serialized_data["body"]
         response = self.api_client.call_api(
             resource_path=used_path,
-            method='post'.upper(),
+            method="post".upper(),
             headers=_headers,
             fields=_fields,
             body=_body,
@@ -292,13 +319,19 @@ class instances
         )
 
         if skip_deserialization:
-            api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+            api_response = api_client.ApiResponseWithoutDeserialization(
+                response=response
+            )
         else:
             response_for_status = _status_code_to_response.get(str(response.status))
             if response_for_status:
-                api_response = response_for_status.deserialize(response, self.api_client.configuration)
+                api_response = response_for_status.deserialize(
+                    response, self.api_client.configuration
+                )
             else:
-                api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+                api_response = api_client.ApiResponseWithoutDeserialization(
+                    response=response
+                )
 
         if not 200 <= response.status <= 299:
             raise exceptions.ApiException(api_response=api_response)
@@ -318,9 +351,7 @@ def log_metric_values(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor201,]: ...
 
     @typing.overload
     def log_metric_values(
@@ -331,9 +362,7 @@ def log_metric_values(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor201,]: ...
 
     @typing.overload
     def log_metric_values(
@@ -363,7 +392,7 @@ def log_metric_values(
     def log_metric_values(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -375,7 +404,7 @@ def log_metric_values(
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
 
 
@@ -391,9 +420,7 @@ def post(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor201,]: ...
 
     @typing.overload
     def post(
@@ -404,9 +431,7 @@ def post(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor201,]: ...
 
     @typing.overload
     def post(
@@ -436,7 +461,7 @@ def post(
     def post(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -448,5 +473,5 @@ def post(
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi
index ab5ede54..2b62d2d8 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1/post.pyi
@@ -3,63 +3,59 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
-from dataclasses import dataclass
-import typing_extensions
-import urllib3
-from urllib3._collections import HTTPHeaderDict
-
-from domino._impl.custommetrics import api_client, exceptions
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
 import uuid  # noqa: F401
+from dataclasses import dataclass
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions
+import urllib3
+from urllib3._collections import HTTPHeaderDict
 
 from domino._impl.custommetrics import schemas  # noqa: F401
-
+from domino._impl.custommetrics import api_client, exceptions
 from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
-from domino._impl.custommetrics.model.new_metric_values_envelope_v1 import NewMetricValuesEnvelopeV1
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
+from domino._impl.custommetrics.model.invalid_body_envelope_v1 import (
+    InvalidBodyEnvelopeV1,
+)
+from domino._impl.custommetrics.model.new_metric_values_envelope_v1 import (
+    NewMetricValuesEnvelopeV1,
+)
 
 # body param
 SchemaForRequestBodyApplicationJson = NewMetricValuesEnvelopeV1
 
-
 request_body_new_metric_values_envelope_v1 = api_client.RequestBody(
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaForRequestBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaForRequestBodyApplicationJson
+        ),
     },
     required=True,
 )
 
-
 @dataclass
 class ApiResponseFor201(api_client.ApiResponse):
     response: urllib3.HTTPResponse
     body: schemas.Unset = schemas.unset
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_201 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor201,
 )
 
-
 class SchemaFor400ResponseBodyApplicationJson(
     schemas.ComposedSchema,
 ):
-
-
     class MetaOapg:
-        
         @classmethod
         @functools.lru_cache()
         def one_of(cls):
@@ -75,13 +71,44 @@ class SchemaFor400ResponseBodyApplicationJson(
                 InvalidBodyEnvelopeV1,
             ]
 
-
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            bool,
+            None,
+            list,
+            tuple,
+            bytes,
+            io.FileIO,
+            io.BufferedReader,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'SchemaFor400ResponseBodyApplicationJson':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "SchemaFor400ResponseBodyApplicationJson":
         return super().__new__(
             cls,
             *args,
@@ -89,103 +116,85 @@ class SchemaFor400ResponseBodyApplicationJson(
             **kwargs,
         )
 
-
 @dataclass
 class ApiResponseFor400(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor400ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor400ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_400 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor400,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor400ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor400ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor401ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor401(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor401ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor401ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_401 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor401,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor401ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor401ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor403ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor403(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor403ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor403ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_403 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor403,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor403ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor403ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor404ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor404(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor404ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor404ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_404 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor404,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor404ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor404ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor500ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor500(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor500ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor500ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_500 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor500,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor500ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor500ResponseBodyApplicationJson
+        ),
     },
 )
-_all_accept_content_types = (
-    'application/json',
-)
-
+_all_accept_content_types = ("application/json",)
 
 class BaseApi(api_client.Api):
     @typing.overload
@@ -197,10 +206,7 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor201,]: ...
     @typing.overload
     def _log_metric_values_oapg(
         self,
@@ -210,11 +216,7 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
-
-
+    ) -> typing.Union[ApiResponseFor201,]: ...
     @typing.overload
     def _log_metric_values_oapg(
         self,
@@ -225,7 +227,6 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def _log_metric_values_oapg(
         self,
@@ -239,11 +240,10 @@ class BaseApi(api_client.Api):
         ApiResponseFor201,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def _log_metric_values_oapg(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -261,22 +261,25 @@ class BaseApi(api_client.Api):
         # TODO add cookie handling
         if accept_content_types:
             for accept_content_type in accept_content_types:
-                _headers.add('Accept', accept_content_type)
+                _headers.add("Accept", accept_content_type)
 
         if body is schemas.unset:
             raise exceptions.ApiValueError(
-                'The required body parameter has an invalid value of: unset. Set a valid value instead')
+                "The required body parameter has an invalid value of: unset. Set a valid value instead"
+            )
         _fields = None
         _body = None
-        serialized_data = request_body_new_metric_values_envelope_v1.serialize(body, content_type)
-        _headers.add('Content-Type', content_type)
-        if 'fields' in serialized_data:
-            _fields = serialized_data['fields']
-        elif 'body' in serialized_data:
-            _body = serialized_data['body']
+        serialized_data = request_body_new_metric_values_envelope_v1.serialize(
+            body, content_type
+        )
+        _headers.add("Content-Type", content_type)
+        if "fields" in serialized_data:
+            _fields = serialized_data["fields"]
+        elif "body" in serialized_data:
+            _body = serialized_data["body"]
         response = self.api_client.call_api(
             resource_path=used_path,
-            method='post'.upper(),
+            method="post".upper(),
             headers=_headers,
             fields=_fields,
             body=_body,
@@ -285,20 +288,25 @@ class BaseApi(api_client.Api):
         )
 
         if skip_deserialization:
-            api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+            api_response = api_client.ApiResponseWithoutDeserialization(
+                response=response
+            )
         else:
             response_for_status = _status_code_to_response.get(str(response.status))
             if response_for_status:
-                api_response = response_for_status.deserialize(response, self.api_client.configuration)
+                api_response = response_for_status.deserialize(
+                    response, self.api_client.configuration
+                )
             else:
-                api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+                api_response = api_client.ApiResponseWithoutDeserialization(
+                    response=response
+                )
 
         if not 200 <= response.status <= 299:
             raise exceptions.ApiException(api_response=api_response)
 
         return api_response
 
-
 class LogMetricValues(BaseApi):
     # this class is used by api classes that refer to endpoints with operationId fn names
 
@@ -311,10 +319,7 @@ class LogMetricValues(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor201,]: ...
     @typing.overload
     def log_metric_values(
         self,
@@ -324,11 +329,7 @@ class LogMetricValues(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
-
-
+    ) -> typing.Union[ApiResponseFor201,]: ...
     @typing.overload
     def log_metric_values(
         self,
@@ -339,7 +340,6 @@ class LogMetricValues(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def log_metric_values(
         self,
@@ -353,11 +353,10 @@ class LogMetricValues(BaseApi):
         ApiResponseFor201,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def log_metric_values(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -369,10 +368,9 @@ class LogMetricValues(BaseApi):
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
 
-
 class ApiForpost(BaseApi):
     # this class is used by api classes that refer to endpoints by path and http method names
 
@@ -385,10 +383,7 @@ class ApiForpost(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor201,]: ...
     @typing.overload
     def post(
         self,
@@ -398,11 +393,7 @@ class ApiForpost(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor201,
-    ]: ...
-
-
+    ) -> typing.Union[ApiResponseFor201,]: ...
     @typing.overload
     def post(
         self,
@@ -413,7 +404,6 @@ class ApiForpost(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def post(
         self,
@@ -427,11 +417,10 @@ class ApiForpost(BaseApi):
         ApiResponseFor201,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def post(
         self,
         body: typing.Union[SchemaForRequestBodyApplicationJson,],
-        content_type: str = 'application/json',
+        content_type: str = "application/json",
         accept_content_types: typing.Tuple[str] = _all_accept_content_types,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -443,7 +432,5 @@ class ApiForpost(BaseApi):
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
-
-
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py
index ff21b1cc..b6bc6cdb 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.py
@@ -3,30 +3,32 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
-from dataclasses import dataclass
-import typing_extensions
-import urllib3
-from urllib3._collections import HTTPHeaderDict
-
-from domino._impl.custommetrics import api_client, exceptions
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
 import uuid  # noqa: F401
+from dataclasses import dataclass
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions
+import urllib3
+from urllib3._collections import HTTPHeaderDict
 
 from domino._impl.custommetrics import schemas  # noqa: F401
-
-from domino._impl.custommetrics.model.metric_values_envelope_v1 import MetricValuesEnvelopeV1
+from domino._impl.custommetrics import api_client, exceptions
 from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
+from domino._impl.custommetrics.model.invalid_body_envelope_v1 import (
+    InvalidBodyEnvelopeV1,
+)
+from domino._impl.custommetrics.model.metric_values_envelope_v1 import (
+    MetricValuesEnvelopeV1,
+)
 
 from . import path
 
@@ -34,17 +36,20 @@
 StartingReferenceTimestampInclusiveSchema = schemas.StrSchema
 EndingReferenceTimestampInclusiveSchema = schemas.StrSchema
 RequestRequiredQueryParams = typing_extensions.TypedDict(
-    'RequestRequiredQueryParams',
+    "RequestRequiredQueryParams",
     {
-        'startingReferenceTimestampInclusive': typing.Union[StartingReferenceTimestampInclusiveSchema, str, ],
-        'endingReferenceTimestampInclusive': typing.Union[EndingReferenceTimestampInclusiveSchema, str, ],
-    }
+        "startingReferenceTimestampInclusive": typing.Union[
+            StartingReferenceTimestampInclusiveSchema,
+            str,
+        ],
+        "endingReferenceTimestampInclusive": typing.Union[
+            EndingReferenceTimestampInclusiveSchema,
+            str,
+        ],
+    },
 )
 RequestOptionalQueryParams = typing_extensions.TypedDict(
-    'RequestOptionalQueryParams',
-    {
-    },
-    total=False
+    "RequestOptionalQueryParams", {}, total=False
 )
 
 
@@ -70,17 +75,20 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams)
 ModelMonitoringIdSchema = schemas.StrSchema
 MetricSchema = schemas.StrSchema
 RequestRequiredPathParams = typing_extensions.TypedDict(
-    'RequestRequiredPathParams',
+    "RequestRequiredPathParams",
     {
-        'modelMonitoringId': typing.Union[ModelMonitoringIdSchema, str, ],
-        'metric': typing.Union[MetricSchema, str, ],
-    }
+        "modelMonitoringId": typing.Union[
+            ModelMonitoringIdSchema,
+            str,
+        ],
+        "metric": typing.Union[
+            MetricSchema,
+            str,
+        ],
+    },
 )
 RequestOptionalPathParams = typing_extensions.TypedDict(
-    'RequestOptionalPathParams',
-    {
-    },
-    total=False
+    "RequestOptionalPathParams", {}, total=False
 )
 
 
@@ -106,17 +114,16 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
 @dataclass
 class ApiResponseFor200(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor200ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor200ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_200 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor200,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor200ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor200ResponseBodyApplicationJson
+        ),
     },
 )
 
@@ -144,10 +151,42 @@ def one_of(cls):
 
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            bool,
+            None,
+            list,
+            tuple,
+            bytes,
+            io.FileIO,
+            io.BufferedReader,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'SchemaFor400ResponseBodyApplicationJson':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "SchemaFor400ResponseBodyApplicationJson":
         return super().__new__(
             cls,
             *args,
@@ -159,17 +198,16 @@ def __new__(
 @dataclass
 class ApiResponseFor400(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor400ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor400ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_400 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor400,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor400ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor400ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor401ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -178,17 +216,16 @@ class ApiResponseFor400(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor401(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor401ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor401ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_401 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor401,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor401ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor401ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor403ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -197,17 +234,16 @@ class ApiResponseFor401(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor403(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor403ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor403ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_403 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor403,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor403ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor403ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor404ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -216,17 +252,16 @@ class ApiResponseFor403(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor404(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor404ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor404ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_404 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor404,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor404ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor404ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor500ResponseBodyApplicationJson = FailureEnvelopeV1
@@ -235,30 +270,27 @@ class ApiResponseFor404(api_client.ApiResponse):
 @dataclass
 class ApiResponseFor500(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor500ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor500ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
 
 _response_for_500 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor500,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor500ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor500ResponseBodyApplicationJson
+        ),
     },
 )
 _status_code_to_response = {
-    '200': _response_for_200,
-    '400': _response_for_400,
-    '401': _response_for_401,
-    '403': _response_for_403,
-    '404': _response_for_404,
-    '500': _response_for_500,
+    "200": _response_for_200,
+    "400": _response_for_400,
+    "401": _response_for_401,
+    "403": _response_for_403,
+    "404": _response_for_404,
+    "500": _response_for_500,
 }
-_all_accept_content_types = (
-    'application/json',
-)
+_all_accept_content_types = ("application/json",)
 
 
 class BaseApi(api_client.Api):
@@ -271,9 +303,7 @@ def _retrieve_metric_values_oapg(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def _retrieve_metric_values_oapg(
@@ -330,7 +360,7 @@ class instances
             _path_params.update(serialized_data)
 
         for k, v in _path_params.items():
-            used_path = used_path.replace('{%s}' % k, v)
+            used_path = used_path.replace("{%s}" % k, v)
 
         prefix_separator_iterator = None
         for parameter in (
@@ -342,7 +372,9 @@ class instances
                 continue
             if prefix_separator_iterator is None:
                 prefix_separator_iterator = parameter.get_prefix_separator_iterator()
-            serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
+            serialized_data = parameter.serialize(
+                parameter_data, prefix_separator_iterator
+            )
             for serialized_value in serialized_data.values():
                 used_path += serialized_value
 
@@ -350,24 +382,30 @@ class instances
         # TODO add cookie handling
         if accept_content_types:
             for accept_content_type in accept_content_types:
-                _headers.add('Accept', accept_content_type)
+                _headers.add("Accept", accept_content_type)
 
         response = self.api_client.call_api(
             resource_path=used_path,
-            method='get'.upper(),
+            method="get".upper(),
             headers=_headers,
             stream=stream,
             timeout=timeout,
         )
 
         if skip_deserialization:
-            api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+            api_response = api_client.ApiResponseWithoutDeserialization(
+                response=response
+            )
         else:
             response_for_status = _status_code_to_response.get(str(response.status))
             if response_for_status:
-                api_response = response_for_status.deserialize(response, self.api_client.configuration)
+                api_response = response_for_status.deserialize(
+                    response, self.api_client.configuration
+                )
             else:
-                api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+                api_response = api_client.ApiResponseWithoutDeserialization(
+                    response=response
+                )
 
         if not 200 <= response.status <= 299:
             raise exceptions.ApiException(api_response=api_response)
@@ -387,9 +425,7 @@ def retrieve_metric_values(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def retrieve_metric_values(
@@ -430,7 +466,7 @@ def retrieve_metric_values(
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
 
 
@@ -446,9 +482,7 @@ def get(
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
+    ) -> typing.Union[ApiResponseFor200,]: ...
 
     @typing.overload
     def get(
@@ -489,5 +523,5 @@ def get(
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
diff --git a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi
index f191e511..2fb71a9a 100644
--- a/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi
+++ b/domino/_impl/custommetrics/paths/api_metric_values_v1_model_monitoring_id_metric/get.pyi
@@ -3,53 +3,56 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
-from dataclasses import dataclass
-import typing_extensions
-import urllib3
-from urllib3._collections import HTTPHeaderDict
-
-from domino._impl.custommetrics import api_client, exceptions
-from datetime import date, datetime  # noqa: F401
 import decimal  # noqa: F401
 import functools  # noqa: F401
 import io  # noqa: F401
 import re  # noqa: F401
 import typing  # noqa: F401
 import uuid  # noqa: F401
+from dataclasses import dataclass
+from datetime import date, datetime  # noqa: F401
 
 import frozendict  # noqa: F401
+import typing_extensions
+import urllib3
+from urllib3._collections import HTTPHeaderDict
 
 from domino._impl.custommetrics import schemas  # noqa: F401
-
-from domino._impl.custommetrics.model.metric_values_envelope_v1 import MetricValuesEnvelopeV1
+from domino._impl.custommetrics import api_client, exceptions
 from domino._impl.custommetrics.model.failure_envelope_v1 import FailureEnvelopeV1
-from domino._impl.custommetrics.model.invalid_body_envelope_v1 import InvalidBodyEnvelopeV1
+from domino._impl.custommetrics.model.invalid_body_envelope_v1 import (
+    InvalidBodyEnvelopeV1,
+)
+from domino._impl.custommetrics.model.metric_values_envelope_v1 import (
+    MetricValuesEnvelopeV1,
+)
 
 # Query params
 StartingReferenceTimestampInclusiveSchema = schemas.StrSchema
 EndingReferenceTimestampInclusiveSchema = schemas.StrSchema
 RequestRequiredQueryParams = typing_extensions.TypedDict(
-    'RequestRequiredQueryParams',
+    "RequestRequiredQueryParams",
     {
-        'startingReferenceTimestampInclusive': typing.Union[StartingReferenceTimestampInclusiveSchema, str, ],
-        'endingReferenceTimestampInclusive': typing.Union[EndingReferenceTimestampInclusiveSchema, str, ],
-    }
+        "startingReferenceTimestampInclusive": typing.Union[
+            StartingReferenceTimestampInclusiveSchema,
+            str,
+        ],
+        "endingReferenceTimestampInclusive": typing.Union[
+            EndingReferenceTimestampInclusiveSchema,
+            str,
+        ],
+    },
 )
 RequestOptionalQueryParams = typing_extensions.TypedDict(
-    'RequestOptionalQueryParams',
-    {
-    },
-    total=False
+    "RequestOptionalQueryParams", {}, total=False
 )
 
-
 class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
     pass
 
-
 request_query_starting_reference_timestamp_inclusive = api_client.QueryParameter(
     name="startingReferenceTimestampInclusive",
     style=api_client.ParameterStyle.FORM,
@@ -68,24 +71,25 @@ request_query_ending_reference_timestamp_inclusive = api_client.QueryParameter(
 ModelMonitoringIdSchema = schemas.StrSchema
 MetricSchema = schemas.StrSchema
 RequestRequiredPathParams = typing_extensions.TypedDict(
-    'RequestRequiredPathParams',
+    "RequestRequiredPathParams",
     {
-        'modelMonitoringId': typing.Union[ModelMonitoringIdSchema, str, ],
-        'metric': typing.Union[MetricSchema, str, ],
-    }
+        "modelMonitoringId": typing.Union[
+            ModelMonitoringIdSchema,
+            str,
+        ],
+        "metric": typing.Union[
+            MetricSchema,
+            str,
+        ],
+    },
 )
 RequestOptionalPathParams = typing_extensions.TypedDict(
-    'RequestOptionalPathParams',
-    {
-    },
-    total=False
+    "RequestOptionalPathParams", {}, total=False
 )
 
-
 class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
     pass
 
-
 request_path_model_monitoring_id = api_client.PathParameter(
     name="modelMonitoringId",
     style=api_client.ParameterStyle.SIMPLE,
@@ -100,32 +104,25 @@ request_path_metric = api_client.PathParameter(
 )
 SchemaFor200ResponseBodyApplicationJson = MetricValuesEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor200(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor200ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor200ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_200 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor200,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor200ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor200ResponseBodyApplicationJson
+        ),
     },
 )
 
-
 class SchemaFor400ResponseBodyApplicationJson(
     schemas.ComposedSchema,
 ):
-
-
     class MetaOapg:
-        
         @classmethod
         @functools.lru_cache()
         def one_of(cls):
@@ -141,13 +138,44 @@ class SchemaFor400ResponseBodyApplicationJson(
                 InvalidBodyEnvelopeV1,
             ]
 
-
     def __new__(
         cls,
-        *args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            bool,
+            None,
+            list,
+            tuple,
+            bytes,
+            io.FileIO,
+            io.BufferedReader,
+        ],
         _configuration: typing.Optional[schemas.Configuration] = None,
-        **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
-    ) -> 'SchemaFor400ResponseBodyApplicationJson':
+        **kwargs: typing.Union[
+            schemas.AnyTypeSchema,
+            dict,
+            frozendict.frozendict,
+            str,
+            date,
+            datetime,
+            uuid.UUID,
+            int,
+            float,
+            decimal.Decimal,
+            None,
+            list,
+            tuple,
+            bytes,
+        ],
+    ) -> "SchemaFor400ResponseBodyApplicationJson":
         return super().__new__(
             cls,
             *args,
@@ -155,103 +183,85 @@ class SchemaFor400ResponseBodyApplicationJson(
             **kwargs,
         )
 
-
 @dataclass
 class ApiResponseFor400(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor400ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor400ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_400 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor400,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor400ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor400ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor401ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor401(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor401ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor401ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_401 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor401,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor401ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor401ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor403ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor403(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor403ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor403ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_403 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor403,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor403ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor403ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor404ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor404(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor404ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor404ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_404 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor404,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor404ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor404ResponseBodyApplicationJson
+        ),
     },
 )
 SchemaFor500ResponseBodyApplicationJson = FailureEnvelopeV1
 
-
 @dataclass
 class ApiResponseFor500(api_client.ApiResponse):
     response: urllib3.HTTPResponse
-    body: typing.Union[
-        SchemaFor500ResponseBodyApplicationJson,
-    ]
+    body: typing.Union[SchemaFor500ResponseBodyApplicationJson,]
     headers: schemas.Unset = schemas.unset
 
-
 _response_for_500 = api_client.OpenApiResponse(
     response_cls=ApiResponseFor500,
     content={
-        'application/json': api_client.MediaType(
-            schema=SchemaFor500ResponseBodyApplicationJson),
+        "application/json": api_client.MediaType(
+            schema=SchemaFor500ResponseBodyApplicationJson
+        ),
     },
 )
-_all_accept_content_types = (
-    'application/json',
-)
-
+_all_accept_content_types = ("application/json",)
 
 class BaseApi(api_client.Api):
     @typing.overload
@@ -263,10 +273,7 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def _retrieve_metric_values_oapg(
         self,
@@ -276,7 +283,6 @@ class BaseApi(api_client.Api):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def _retrieve_metric_values_oapg(
         self,
@@ -290,7 +296,6 @@ class BaseApi(api_client.Api):
         ApiResponseFor200,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def _retrieve_metric_values_oapg(
         self,
         query_params: RequestQueryParams = frozendict.frozendict(),
@@ -322,7 +327,7 @@ class BaseApi(api_client.Api):
             _path_params.update(serialized_data)
 
         for k, v in _path_params.items():
-            used_path = used_path.replace('{%s}' % k, v)
+            used_path = used_path.replace("{%s}" % k, v)
 
         prefix_separator_iterator = None
         for parameter in (
@@ -334,7 +339,9 @@ class BaseApi(api_client.Api):
                 continue
             if prefix_separator_iterator is None:
                 prefix_separator_iterator = parameter.get_prefix_separator_iterator()
-            serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
+            serialized_data = parameter.serialize(
+                parameter_data, prefix_separator_iterator
+            )
             for serialized_value in serialized_data.values():
                 used_path += serialized_value
 
@@ -342,31 +349,36 @@ class BaseApi(api_client.Api):
         # TODO add cookie handling
         if accept_content_types:
             for accept_content_type in accept_content_types:
-                _headers.add('Accept', accept_content_type)
+                _headers.add("Accept", accept_content_type)
 
         response = self.api_client.call_api(
             resource_path=used_path,
-            method='get'.upper(),
+            method="get".upper(),
             headers=_headers,
             stream=stream,
             timeout=timeout,
         )
 
         if skip_deserialization:
-            api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+            api_response = api_client.ApiResponseWithoutDeserialization(
+                response=response
+            )
         else:
             response_for_status = _status_code_to_response.get(str(response.status))
             if response_for_status:
-                api_response = response_for_status.deserialize(response, self.api_client.configuration)
+                api_response = response_for_status.deserialize(
+                    response, self.api_client.configuration
+                )
             else:
-                api_response = api_client.ApiResponseWithoutDeserialization(response=response)
+                api_response = api_client.ApiResponseWithoutDeserialization(
+                    response=response
+                )
 
         if not 200 <= response.status <= 299:
             raise exceptions.ApiException(api_response=api_response)
 
         return api_response
 
-
 class RetrieveMetricValues(BaseApi):
     # this class is used by api classes that refer to endpoints with operationId fn names
 
@@ -379,10 +391,7 @@ class RetrieveMetricValues(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def retrieve_metric_values(
         self,
@@ -392,7 +401,6 @@ class RetrieveMetricValues(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def retrieve_metric_values(
         self,
@@ -406,7 +414,6 @@ class RetrieveMetricValues(BaseApi):
         ApiResponseFor200,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def retrieve_metric_values(
         self,
         query_params: RequestQueryParams = frozendict.frozendict(),
@@ -422,10 +429,9 @@ class RetrieveMetricValues(BaseApi):
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
 
-
 class ApiForget(BaseApi):
     # this class is used by api classes that refer to endpoints by path and http method names
 
@@ -438,10 +444,7 @@ class ApiForget(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
         skip_deserialization: typing_extensions.Literal[False] = ...,
-    ) -> typing.Union[
-        ApiResponseFor200,
-    ]: ...
-
+    ) -> typing.Union[ApiResponseFor200,]: ...
     @typing.overload
     def get(
         self,
@@ -451,7 +454,6 @@ class ApiForget(BaseApi):
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
     ) -> api_client.ApiResponseWithoutDeserialization: ...
-
     @typing.overload
     def get(
         self,
@@ -465,7 +467,6 @@ class ApiForget(BaseApi):
         ApiResponseFor200,
         api_client.ApiResponseWithoutDeserialization,
     ]: ...
-
     def get(
         self,
         query_params: RequestQueryParams = frozendict.frozendict(),
@@ -481,7 +482,5 @@ class ApiForget(BaseApi):
             accept_content_types=accept_content_types,
             stream=stream,
             timeout=timeout,
-            skip_deserialization=skip_deserialization
+            skip_deserialization=skip_deserialization,
         )
-
-
diff --git a/domino/_impl/custommetrics/rest.py b/domino/_impl/custommetrics/rest.py
index 0a711e2b..3fc70a72 100644
--- a/domino/_impl/custommetrics/rest.py
+++ b/domino/_impl/custommetrics/rest.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import logging
@@ -19,7 +19,6 @@
 
 from domino._impl.custommetrics.exceptions import ApiException, ApiValueError
 
-
 logger = logging.getLogger(__name__)
 
 
@@ -47,13 +46,15 @@ def __init__(self, configuration, pools_size=4, maxsize=None):
 
         addition_pool_args = {}
         if configuration.assert_hostname is not None:
-            addition_pool_args['assert_hostname'] = configuration.assert_hostname  # noqa: E501
+            addition_pool_args["assert_hostname"] = (
+                configuration.assert_hostname
+            )  # noqa: E501
 
         if configuration.retries is not None:
-            addition_pool_args['retries'] = configuration.retries
+            addition_pool_args["retries"] = configuration.retries
 
         if configuration.socket_options is not None:
-            addition_pool_args['socket_options'] = configuration.socket_options
+            addition_pool_args["socket_options"] = configuration.socket_options
 
         if maxsize is None:
             if configuration.connection_pool_maxsize is not None:
@@ -72,7 +73,7 @@ def __init__(self, configuration, pools_size=4, maxsize=None):
                 key_file=configuration.key_file,
                 proxy_url=configuration.proxy,
                 proxy_headers=configuration.proxy_headers,
-                **addition_pool_args
+                **addition_pool_args,
             )
         else:
             self.pool_manager = urllib3.PoolManager(
@@ -82,7 +83,7 @@ def __init__(self, configuration, pools_size=4, maxsize=None):
                 ca_certs=ca_certs,
                 cert_file=configuration.cert_file,
                 key_file=configuration.key_file,
-                **addition_pool_args
+                **addition_pool_args,
             )
 
     def request(
@@ -90,7 +91,9 @@ def request(
         method: str,
         url: str,
         headers: typing.Optional[HTTPHeaderDict] = None,
-        fields: typing.Optional[typing.Tuple[typing.Tuple[str, typing.Any], ...]] = None,
+        fields: typing.Optional[
+            typing.Tuple[typing.Tuple[str, typing.Any], ...]
+        ] = None,
         body: typing.Optional[typing.Union[str, bytes]] = None,
         stream: bool = False,
         timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
@@ -113,13 +116,10 @@ def request(
                                 (connection, read) timeouts.
         """
         method = method.upper()
-        assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
-                          'PATCH', 'OPTIONS']
+        assert method in ["GET", "HEAD", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"]
 
         if fields and body:
-            raise ApiValueError(
-                "body parameter cannot be used with fields parameter."
-            )
+            raise ApiValueError("body parameter cannot be used with fields parameter.")
 
         fields = fields or {}
         headers = headers or {}
@@ -127,52 +127,59 @@ def request(
         if timeout:
             if isinstance(timeout, (int, float)):  # noqa: E501,F821
                 timeout = urllib3.Timeout(total=timeout)
-            elif (isinstance(timeout, tuple) and
-                  len(timeout) == 2):
+            elif isinstance(timeout, tuple) and len(timeout) == 2:
                 timeout = urllib3.Timeout(connect=timeout[0], read=timeout[1])
 
         try:
             # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
-            if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
-                if 'Content-Type' not in headers and body is None:
+            if method in ["POST", "PUT", "PATCH", "OPTIONS", "DELETE"]:
+                if "Content-Type" not in headers and body is None:
                     r = self.pool_manager.request(
                         method,
                         url,
                         preload_content=not stream,
                         timeout=timeout,
-                        headers=headers
+                        headers=headers,
                     )
-                elif headers['Content-Type'] == 'application/x-www-form-urlencoded':  # noqa: E501
+                elif (
+                    headers["Content-Type"] == "application/x-www-form-urlencoded"
+                ):  # noqa: E501
                     r = self.pool_manager.request(
-                        method, url,
+                        method,
+                        url,
                         fields=fields,
                         encode_multipart=False,
                         preload_content=not stream,
                         timeout=timeout,
-                        headers=headers)
-                elif headers['Content-Type'] == 'multipart/form-data':
+                        headers=headers,
+                    )
+                elif headers["Content-Type"] == "multipart/form-data":
                     # must del headers['Content-Type'], or the correct
                     # Content-Type which generated by urllib3 will be
                     # overwritten.
-                    del headers['Content-Type']
+                    del headers["Content-Type"]
                     r = self.pool_manager.request(
-                        method, url,
+                        method,
+                        url,
                         fields=fields,
                         encode_multipart=True,
                         preload_content=not stream,
                         timeout=timeout,
-                        headers=headers)
+                        headers=headers,
+                    )
                 # Pass a `string` parameter directly in the body to support
                 # other content types than Json when `body` argument is
                 # provided in serialized form
                 elif isinstance(body, str) or isinstance(body, bytes):
                     request_body = body
                     r = self.pool_manager.request(
-                        method, url,
+                        method,
+                        url,
                         body=request_body,
                         preload_content=not stream,
                         timeout=timeout,
-                        headers=headers)
+                        headers=headers,
+                    )
                 else:
                     # Cannot generate the request from given parameters
                     msg = """Cannot prepare a request message for provided
@@ -181,10 +188,13 @@ def request(
                     raise ApiException(status=0, reason=msg)
             # For `GET`, `HEAD`
             else:
-                r = self.pool_manager.request(method, url,
-                                              preload_content=not stream,
-                                              timeout=timeout,
-                                              headers=headers)
+                r = self.pool_manager.request(
+                    method,
+                    url,
+                    preload_content=not stream,
+                    timeout=timeout,
+                    headers=headers,
+                )
         except urllib3.exceptions.SSLError as e:
             msg = "{0}\n{1}".format(type(e).__name__, str(e))
             raise ApiException(status=0, reason=msg)
@@ -195,58 +205,81 @@ def request(
 
         return r
 
-    def GET(self, url, headers=None, stream=False,
-            timeout=None, fields=None) -> urllib3.HTTPResponse:
-        return self.request("GET", url,
-                            headers=headers,
-                            stream=stream,
-                            timeout=timeout,
-                            fields=fields)
-
-    def HEAD(self, url, headers=None, stream=False,
-             timeout=None, fields=None) -> urllib3.HTTPResponse:
-        return self.request("HEAD", url,
-                            headers=headers,
-                            stream=stream,
-                            timeout=timeout,
-                            fields=fields)
-
-    def OPTIONS(self, url, headers=None,
-                body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
-        return self.request("OPTIONS", url,
-                            headers=headers,
-                            stream=stream,
-                            timeout=timeout,
-                            body=body, fields=fields)
-
-    def DELETE(self, url, headers=None, body=None,
-               stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
-        return self.request("DELETE", url,
-                            headers=headers,
-                            stream=stream,
-                            timeout=timeout,
-                            body=body, fields=fields)
-
-    def POST(self, url, headers=None,
-             body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
-        return self.request("POST", url,
-                            headers=headers,
-                            stream=stream,
-                            timeout=timeout,
-                            body=body, fields=fields)
-
-    def PUT(self, url, headers=None,
-            body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
-        return self.request("PUT", url,
-                            headers=headers,
-                            stream=stream,
-                            timeout=timeout,
-                            body=body, fields=fields)
-
-    def PATCH(self, url, headers=None,
-              body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
-        return self.request("PATCH", url,
-                            headers=headers,
-                            stream=stream,
-                            timeout=timeout,
-                            body=body, fields=fields)
+    def GET(
+        self, url, headers=None, stream=False, timeout=None, fields=None
+    ) -> urllib3.HTTPResponse:
+        return self.request(
+            "GET", url, headers=headers, stream=stream, timeout=timeout, fields=fields
+        )
+
+    def HEAD(
+        self, url, headers=None, stream=False, timeout=None, fields=None
+    ) -> urllib3.HTTPResponse:
+        return self.request(
+            "HEAD", url, headers=headers, stream=stream, timeout=timeout, fields=fields
+        )
+
+    def OPTIONS(
+        self, url, headers=None, body=None, stream=False, timeout=None, fields=None
+    ) -> urllib3.HTTPResponse:
+        return self.request(
+            "OPTIONS",
+            url,
+            headers=headers,
+            stream=stream,
+            timeout=timeout,
+            body=body,
+            fields=fields,
+        )
+
+    def DELETE(
+        self, url, headers=None, body=None, stream=False, timeout=None, fields=None
+    ) -> urllib3.HTTPResponse:
+        return self.request(
+            "DELETE",
+            url,
+            headers=headers,
+            stream=stream,
+            timeout=timeout,
+            body=body,
+            fields=fields,
+        )
+
+    def POST(
+        self, url, headers=None, body=None, stream=False, timeout=None, fields=None
+    ) -> urllib3.HTTPResponse:
+        return self.request(
+            "POST",
+            url,
+            headers=headers,
+            stream=stream,
+            timeout=timeout,
+            body=body,
+            fields=fields,
+        )
+
+    def PUT(
+        self, url, headers=None, body=None, stream=False, timeout=None, fields=None
+    ) -> urllib3.HTTPResponse:
+        return self.request(
+            "PUT",
+            url,
+            headers=headers,
+            stream=stream,
+            timeout=timeout,
+            body=body,
+            fields=fields,
+        )
+
+    def PATCH(
+        self, url, headers=None, body=None, stream=False, timeout=None, fields=None
+    ) -> urllib3.HTTPResponse:
+        return self.request(
+            "PATCH",
+            url,
+            headers=headers,
+            stream=stream,
+            timeout=timeout,
+            body=body,
+            fields=fields,
+        )
diff --git a/domino/_impl/custommetrics/schemas.py b/domino/_impl/custommetrics/schemas.py
index a59c4929..42c34188 100644
--- a/domino/_impl/custommetrics/schemas.py
+++ b/domino/_impl/custommetrics/schemas.py
@@ -1,34 +1,29 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
-from collections import defaultdict
-from datetime import date, datetime, timedelta  # noqa: F401
-import functools
 import decimal
+import functools
 import io
 import re
 import types
 import typing
 import uuid
+from collections import defaultdict
+from datetime import date, datetime, timedelta  # noqa: F401
 
-from dateutil.parser.isoparser import isoparser, _takes_ascii
 import frozendict
+from dateutil.parser.isoparser import _takes_ascii, isoparser
 
-from domino._impl.custommetrics.exceptions import (
-    ApiTypeError,
-    ApiValueError,
-)
-from domino._impl.custommetrics.configuration import (
-    Configuration,
-)
+from domino._impl.custommetrics.configuration import Configuration
+from domino._impl.custommetrics.exceptions import ApiTypeError, ApiValueError
 
 
 class Unset(object):
@@ -36,6 +31,7 @@ class Unset(object):
     An instance of this class is set as the default value for object type(dict) properties that are optional
     When a property has an unset value, that property will not be assigned in the dict
     """
+
     pass
 
 
@@ -54,12 +50,14 @@ class FileIO(io.FileIO):
     def __new__(cls, arg: typing.Union[io.FileIO, io.BufferedReader]):
         if isinstance(arg, (io.FileIO, io.BufferedReader)):
             if arg.closed:
-                raise ApiValueError('Invalid file state; file is closed and must be open')
+                raise ApiValueError(
+                    "Invalid file state; file is closed and must be open"
+                )
             arg.close()
             inst = super(FileIO, cls).__new__(cls, arg.name)
             super(FileIO, inst).__init__(arg.name)
             return inst
-        raise ApiValueError('FileIO must be passed arg which contains the open file')
+        raise ApiValueError("FileIO must be passed arg which contains the open file")
 
     def __init__(self, arg: typing.Union[io.FileIO, io.BufferedReader]):
         pass
@@ -83,13 +81,16 @@ class ValidationMetadata(frozendict.frozendict):
     """
     A class storing metadata that is needed to validate OpenApi Schema payloads
     """
+
     def __new__(
         cls,
-        path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(['args[0]']),
+        path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(["args[0]"]),
         from_server: bool = False,
         configuration: typing.Optional[Configuration] = None,
         seen_classes: typing.FrozenSet[typing.Type] = frozenset(),
-        validated_path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]] = frozendict.frozendict()
+        validated_path_to_schemas: typing.Dict[
+            typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]
+        ] = frozendict.frozendict(),
     ):
         """
         Args:
@@ -116,7 +117,7 @@ def __new__(
             from_server=from_server,
             configuration=configuration,
             seen_classes=seen_classes,
-            validated_path_to_schemas=validated_path_to_schemas
+            validated_path_to_schemas=validated_path_to_schemas,
         )
 
     def validation_ran_earlier(self, cls: type) -> bool:
@@ -130,23 +131,27 @@ def validation_ran_earlier(self, cls: type) -> bool:
 
     @property
     def path_to_item(self) -> typing.Tuple[typing.Union[str, int], ...]:
-        return self.get('path_to_item')
+        return self.get("path_to_item")
 
     @property
     def from_server(self) -> bool:
-        return self.get('from_server')
+        return self.get("from_server")
 
     @property
     def configuration(self) -> typing.Optional[Configuration]:
-        return self.get('configuration')
+        return self.get("configuration")
 
     @property
     def seen_classes(self) -> typing.FrozenSet[typing.Type]:
-        return self.get('seen_classes')
+        return self.get("seen_classes")
 
     @property
-    def validated_path_to_schemas(self) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]]:
-        return self.get('validated_path_to_schemas')
+    def validated_path_to_schemas(
+        self,
+    ) -> typing.Dict[
+        typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]
+    ]:
+        return self.get("validated_path_to_schemas")
 
 
 class Singleton:
@@ -154,6 +159,7 @@ class Singleton:
     Enums and singletons are the same
     The same instance is returned for a given key of (cls, arg)
     """
+
     _instances = {}
 
     def __new__(cls, arg: typing.Any, **kwargs):
@@ -177,12 +183,12 @@ def __new__(cls, arg: typing.Any, **kwargs):
 
     def __repr__(self):
         if isinstance(self, NoneClass):
-            return f'<{self.__class__.__name__}: None>'
+            return f"<{self.__class__.__name__}: None>"
         elif isinstance(self, BoolClass):
             if bool(self):
-                return f'<{self.__class__.__name__}: True>'
-            return f'<{self.__class__.__name__}: False>'
-        return f'<{self.__class__.__name__}: {super().__repr__()}>'
+                return f"<{self.__class__.__name__}: True>"
+            return f"<{self.__class__.__name__}: False>"
+        return f"<{self.__class__.__name__}: {super().__repr__()}>"
 
 
 class classproperty:
@@ -217,7 +223,7 @@ def __bool__(self) -> bool:
         for key, instance in self._instances.items():
             if self is instance:
                 return bool(key[1])
-        raise ValueError('Unable to find the boolean value of this instance')
+        raise ValueError("Unable to find the boolean value of this instance")
 
 
 class MetaOapgTyped:
@@ -227,29 +233,39 @@ class MetaOapgTyped:
     inclusive_minimum: typing.Union[int, float]
     max_items: int
     min_items: int
-    discriminator: typing.Dict[str, typing.Dict[str, typing.Type['Schema']]]
+    discriminator: typing.Dict[str, typing.Dict[str, typing.Type["Schema"]]]
 
     class properties:
         # to hold object properties
         pass
 
-    additional_properties: typing.Optional[typing.Type['Schema']]
+    additional_properties: typing.Optional[typing.Type["Schema"]]
     max_properties: int
     min_properties: int
-    all_of: typing.List[typing.Type['Schema']]
-    one_of: typing.List[typing.Type['Schema']]
-    any_of: typing.List[typing.Type['Schema']]
-    not_schema: typing.Type['Schema']
+    all_of: typing.List[typing.Type["Schema"]]
+    one_of: typing.List[typing.Type["Schema"]]
+    any_of: typing.List[typing.Type["Schema"]]
+    not_schema: typing.Type["Schema"]
     max_length: int
     min_length: int
-    items: typing.Type['Schema']
+    items: typing.Type["Schema"]
 
 
 class Schema:
     """
     the base class of all swagger/openapi schemas/models
     """
-    __inheritable_primitive_types_set = {decimal.Decimal, str, tuple, frozendict.frozendict, FileIO, bytes, BoolClass, NoneClass}
+
+    __inheritable_primitive_types_set = {
+        decimal.Decimal,
+        str,
+        tuple,
+        frozendict.frozendict,
+        FileIO,
+        bytes,
+        BoolClass,
+        NoneClass,
+    }
     _types: typing.Set[typing.Type]
     MetaOapg = MetaOapgTyped
 
@@ -264,7 +280,9 @@ def __get_valid_classes_phrase(input_classes):
         return "is one of [{0}]".format(", ".join(all_class_names))
 
     @staticmethod
-    def _get_class_oapg(item_cls: typing.Union[types.FunctionType, staticmethod, typing.Type['Schema']]) -> typing.Type['Schema']:
+    def _get_class_oapg(
+        item_cls: typing.Union[types.FunctionType, staticmethod, typing.Type["Schema"]],
+    ) -> typing.Type["Schema"]:
         if isinstance(item_cls, types.FunctionType):
             # referenced schema
             return item_cls()
@@ -291,8 +309,7 @@ def __type_error_message(
         if key_type:
             key_or_value = "key"
         valid_classes_phrase = cls.__get_valid_classes_phrase(valid_classes)
-        msg = "Invalid type. Required {1} type {2} and " "passed type was {3}".format(
-            var_name,
+        msg = "Invalid type. Required {0} type {1} and passed type was {2}".format(
             key_or_value,
             valid_classes_phrase,
             type(var_value).__name__,
@@ -319,7 +336,20 @@ def _validate_oapg(
         cls,
         arg,
         validation_metadata: ValidationMetadata,
-    ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]:
+    ) -> typing.Dict[
+        typing.Tuple[typing.Union[str, int], ...],
+        typing.Set[
+            typing.Union[
+                "Schema",
+                str,
+                decimal.Decimal,
+                BoolClass,
+                NoneClass,
+                frozendict.frozendict,
+                tuple,
+            ]
+        ],
+    ]:
         """
         Schema _validate_oapg
         All keyword validation except for type checking was done in calling stack frames
@@ -348,7 +378,17 @@ def _validate_oapg(
 
     @staticmethod
     def _process_schema_classes_oapg(
-        schema_classes: typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]
+        schema_classes: typing.Set[
+            typing.Union[
+                "Schema",
+                str,
+                decimal.Decimal,
+                BoolClass,
+                NoneClass,
+                frozendict.frozendict,
+                tuple,
+            ]
+        ],
     ):
         """
         Processes and mutates schema_classes
@@ -368,10 +408,8 @@ def _process_schema_classes_oapg(
 
     @classmethod
     def __get_new_cls(
-        cls,
-        arg,
-        validation_metadata: ValidationMetadata
-    ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']]:
+        cls, arg, validation_metadata: ValidationMetadata
+    ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]]:
         """
         Make a new dynamic class and return an instance of that class
         We are making an instance of cls, but instead of making cls
@@ -395,7 +433,9 @@ def __get_new_cls(
         if validation_metadata.validated_path_to_schemas:
             update(_path_to_schemas, validation_metadata.validated_path_to_schemas)
         if not validation_metadata.validation_ran_earlier(cls):
-            other_path_to_schemas = cls._validate_oapg(arg, validation_metadata=validation_metadata)
+            other_path_to_schemas = cls._validate_oapg(
+                arg, validation_metadata=validation_metadata
+            )
             update(_path_to_schemas, other_path_to_schemas)
         # loop through it make a new class for each entry
         # do not modify the returned result because it is cached and we would be modifying the cached value
@@ -411,15 +451,21 @@ def __get_new_cls(
             """
             cls._process_schema_classes_oapg(schema_classes)
             enum_schema = any(
-                issubclass(this_cls, EnumBase) for this_cls in schema_classes)
-            inheritable_primitive_type = schema_classes.intersection(cls.__inheritable_primitive_types_set)
+                issubclass(this_cls, EnumBase) for this_cls in schema_classes
+            )
+            inheritable_primitive_type = schema_classes.intersection(
+                cls.__inheritable_primitive_types_set
+            )
             chosen_schema_classes = schema_classes - inheritable_primitive_type
             suffix = tuple(inheritable_primitive_type)
             if enum_schema and suffix[0] not in {NoneClass, BoolClass}:
                 suffix = (Singleton,) + suffix
 
-            used_classes = tuple(sorted(chosen_schema_classes, key=lambda a_cls: a_cls.__name__)) + suffix
-            mfg_cls = get_new_class(class_name='DynamicSchema', bases=used_classes)
+            used_classes = (
+                tuple(sorted(chosen_schema_classes, key=lambda a_cls: a_cls.__name__))
+                + suffix
+            )
+            mfg_cls = get_new_class(class_name="DynamicSchema", bases=used_classes)
             path_to_schemas[path] = mfg_cls
 
         return path_to_schemas
@@ -429,7 +475,9 @@ def _get_new_instance_without_conversion_oapg(
         cls,
         arg: typing.Any,
         path_to_item: typing.Tuple[typing.Union[str, int], ...],
-        path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']]
+        path_to_schemas: typing.Dict[
+            typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]
+        ],
     ):
         # We have a Dynamic class and we are making an instance of it
         if issubclass(cls, frozendict.frozendict) and issubclass(cls, DictBase):
@@ -458,16 +506,16 @@ def from_openapi_data_oapg(
             decimal.Decimal,
             bool,
             None,
-            'Schema',
+            "Schema",
             dict,
             frozendict.frozendict,
             tuple,
             list,
             io.FileIO,
             io.BufferedReader,
-            bytes
+            bytes,
         ],
-        _configuration: typing.Optional[Configuration]
+        _configuration: typing.Optional[Configuration],
     ):
         """
         Schema from_openapi_data_oapg
@@ -476,13 +524,14 @@ def from_openapi_data_oapg(
         validated_path_to_schemas = {}
         arg = cast_to_allowed_types(arg, from_server, validated_path_to_schemas)
         validation_metadata = ValidationMetadata(
-            from_server=from_server, configuration=_configuration, validated_path_to_schemas=validated_path_to_schemas)
+            from_server=from_server,
+            configuration=_configuration,
+            validated_path_to_schemas=validated_path_to_schemas,
+        )
         path_to_schemas = cls.__get_new_cls(arg, validation_metadata)
         new_cls = path_to_schemas[validation_metadata.path_to_item]
         new_inst = new_cls._get_new_instance_without_conversion_oapg(
-            arg,
-            validation_metadata.path_to_item,
-            path_to_schemas
+            arg, validation_metadata.path_to_item, path_to_schemas
         )
         return new_inst
 
@@ -499,7 +548,41 @@ def __get_input_dict(*args, **kwargs) -> frozendict.frozendict:
     def __remove_unsets(kwargs):
         return {key: val for key, val in kwargs.items() if val is not unset}
 
-    def __new__(cls, *args: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema'], _configuration: typing.Optional[Configuration] = None, **kwargs: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema', Unset]):
+    def __new__(
+        cls,
+        *args: typing.Union[
+            dict,
+            frozendict.frozendict,
+            list,
+            tuple,
+            decimal.Decimal,
+            float,
+            int,
+            str,
+            date,
+            datetime,
+            bool,
+            None,
+            "Schema",
+        ],
+        _configuration: typing.Optional[Configuration] = None,
+        **kwargs: typing.Union[
+            dict,
+            frozendict.frozendict,
+            list,
+            tuple,
+            decimal.Decimal,
+            float,
+            int,
+            str,
+            date,
+            datetime,
+            bool,
+            None,
+            "Schema",
+            Unset,
+        ],
+    ):
         """
         Schema __new__
 
@@ -514,35 +597,59 @@ def __new__(cls, *args: typing.Union[dict, frozendict.frozendict, list, tuple, d
         """
         __kwargs = cls.__remove_unsets(kwargs)
         if not args and not __kwargs:
-            raise TypeError(
-                'No input given. args or kwargs must be given.'
-            )
+            raise TypeError("No input given. args or kwargs must be given.")
         if not __kwargs and args and not isinstance(args[0], dict):
             __arg = args[0]
         else:
             __arg = cls.__get_input_dict(*args, **__kwargs)
         __from_server = False
         __validated_path_to_schemas = {}
-        __arg = cast_to_allowed_types(
-            __arg, __from_server, __validated_path_to_schemas)
+        __arg = cast_to_allowed_types(__arg, __from_server, __validated_path_to_schemas)
         __validation_metadata = ValidationMetadata(
-            configuration=_configuration, from_server=__from_server, validated_path_to_schemas=__validated_path_to_schemas)
+            configuration=_configuration,
+            from_server=__from_server,
+            validated_path_to_schemas=__validated_path_to_schemas,
+        )
         __path_to_schemas = cls.__get_new_cls(__arg, __validation_metadata)
         __new_cls = __path_to_schemas[__validation_metadata.path_to_item]
         return __new_cls._get_new_instance_without_conversion_oapg(
-            __arg,
-            __validation_metadata.path_to_item,
-            __path_to_schemas
+            __arg, __validation_metadata.path_to_item, __path_to_schemas
         )
 
     def __init__(
         self,
         *args: typing.Union[
-            dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema'],
+            dict,
+            frozendict.frozendict,
+            list,
+            tuple,
+            decimal.Decimal,
+            float,
+            int,
+            str,
+            date,
+            datetime,
+            bool,
+            None,
+            "Schema",
+        ],
         _configuration: typing.Optional[Configuration] = None,
         **kwargs: typing.Union[
-            dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema', Unset
-        ]
+            dict,
+            frozendict.frozendict,
+            list,
+            tuple,
+            decimal.Decimal,
+            float,
+            int,
+            str,
+            date,
+            datetime,
+            bool,
+            None,
+            "Schema",
+            Unset,
+        ],
     ):
         """
         this is needed to fix 'Unexpected argument' warning in pycharm
@@ -631,6 +738,7 @@ class StrBoolMixin(str, BoolClass):
 
     class DecimalBoolMixin(decimal.Decimal, BoolClass):
         pass
+
     # qty 3
 
     class NoneFrozenDictTupleMixin(NoneClass, frozendict.frozendict, tuple):
@@ -692,24 +800,33 @@ class TupleDecimalBoolMixin(tuple, decimal.Decimal, BoolClass):
 
     class StrDecimalBoolMixin(str, decimal.Decimal, BoolClass):
         pass
+
     # qty 4
 
     class NoneFrozenDictTupleStrMixin(NoneClass, frozendict.frozendict, tuple, str):
         pass
 
-    class NoneFrozenDictTupleDecimalMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal):
+    class NoneFrozenDictTupleDecimalMixin(
+        NoneClass, frozendict.frozendict, tuple, decimal.Decimal
+    ):
         pass
 
-    class NoneFrozenDictTupleBoolMixin(NoneClass, frozendict.frozendict, tuple, BoolClass):
+    class NoneFrozenDictTupleBoolMixin(
+        NoneClass, frozendict.frozendict, tuple, BoolClass
+    ):
         pass
 
-    class NoneFrozenDictStrDecimalMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal):
+    class NoneFrozenDictStrDecimalMixin(
+        NoneClass, frozendict.frozendict, str, decimal.Decimal
+    ):
         pass
 
     class NoneFrozenDictStrBoolMixin(NoneClass, frozendict.frozendict, str, BoolClass):
         pass
 
-    class NoneFrozenDictDecimalBoolMixin(NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass):
+    class NoneFrozenDictDecimalBoolMixin(
+        NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass
+    ):
         pass
 
     class NoneTupleStrDecimalMixin(NoneClass, tuple, str, decimal.Decimal):
@@ -724,47 +841,80 @@ class NoneTupleDecimalBoolMixin(NoneClass, tuple, decimal.Decimal, BoolClass):
     class NoneStrDecimalBoolMixin(NoneClass, str, decimal.Decimal, BoolClass):
         pass
 
-    class FrozenDictTupleStrDecimalMixin(frozendict.frozendict, tuple, str, decimal.Decimal):
+    class FrozenDictTupleStrDecimalMixin(
+        frozendict.frozendict, tuple, str, decimal.Decimal
+    ):
         pass
 
     class FrozenDictTupleStrBoolMixin(frozendict.frozendict, tuple, str, BoolClass):
         pass
 
-    class FrozenDictTupleDecimalBoolMixin(frozendict.frozendict, tuple, decimal.Decimal, BoolClass):
+    class FrozenDictTupleDecimalBoolMixin(
+        frozendict.frozendict, tuple, decimal.Decimal, BoolClass
+    ):
         pass
 
-    class FrozenDictStrDecimalBoolMixin(frozendict.frozendict, str, decimal.Decimal, BoolClass):
+    class FrozenDictStrDecimalBoolMixin(
+        frozendict.frozendict, str, decimal.Decimal, BoolClass
+    ):
         pass
 
     class TupleStrDecimalBoolMixin(tuple, str, decimal.Decimal, BoolClass):
         pass
+
     # qty 5
 
-    class NoneFrozenDictTupleStrDecimalMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal):
+    class NoneFrozenDictTupleStrDecimalMixin(
+        NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal
+    ):
         pass
 
-    class NoneFrozenDictTupleStrBoolMixin(NoneClass, frozendict.frozendict, tuple, str, BoolClass):
+    class NoneFrozenDictTupleStrBoolMixin(
+        NoneClass, frozendict.frozendict, tuple, str, BoolClass
+    ):
         pass
 
-    class NoneFrozenDictTupleDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass):
+    class NoneFrozenDictTupleDecimalBoolMixin(
+        NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass
+    ):
         pass
 
-    class NoneFrozenDictStrDecimalBoolMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass):
+    class NoneFrozenDictStrDecimalBoolMixin(
+        NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass
+    ):
         pass
 
-    class NoneTupleStrDecimalBoolMixin(NoneClass, tuple, str, decimal.Decimal, BoolClass):
+    class NoneTupleStrDecimalBoolMixin(
+        NoneClass, tuple, str, decimal.Decimal, BoolClass
+    ):
         pass
 
-    class FrozenDictTupleStrDecimalBoolMixin(frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass):
+    class FrozenDictTupleStrDecimalBoolMixin(
+        frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass
+    ):
         pass
+
     # qty 6
 
-    class NoneFrozenDictTupleStrDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass):
+    class NoneFrozenDictTupleStrDecimalBoolMixin(
+        NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass
+    ):
         pass
+
     # qty 8
 
-    class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes):
+    class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin(
+        NoneClass,
+        frozendict.frozendict,
+        tuple,
+        str,
+        decimal.Decimal,
+        BoolClass,
+        FileIO,
+        bytes,
+    ):
         pass
+
 else:
     # qty 1
     class NoneMixin:
@@ -790,6 +940,7 @@ class BytesMixin:
 
     class FileMixin:
         _types = {FileIO}
+
     # qty 2
 
     class BinaryMixin:
@@ -839,6 +990,7 @@ class StrBoolMixin:
 
     class DecimalBoolMixin:
         _types = {decimal.Decimal, BoolClass}
+
     # qty 3
 
     class NoneFrozenDictTupleMixin:
@@ -900,6 +1052,7 @@ class TupleDecimalBoolMixin:
 
     class StrDecimalBoolMixin:
         _types = {str, decimal.Decimal, BoolClass}
+
     # qty 4
 
     class NoneFrozenDictTupleStrMixin:
@@ -946,6 +1099,7 @@ class FrozenDictStrDecimalBoolMixin:
 
     class TupleStrDecimalBoolMixin:
         _types = {tuple, str, decimal.Decimal, BoolClass}
+
     # qty 5
 
     class NoneFrozenDictTupleStrDecimalMixin:
@@ -965,14 +1119,32 @@ class NoneTupleStrDecimalBoolMixin:
 
     class FrozenDictTupleStrDecimalBoolMixin:
         _types = {frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass}
+
     # qty 6
 
     class NoneFrozenDictTupleStrDecimalBoolMixin:
-        _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass}
+        _types = {
+            NoneClass,
+            frozendict.frozendict,
+            tuple,
+            str,
+            decimal.Decimal,
+            BoolClass,
+        }
+
     # qty 8
 
     class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin:
-        _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes}
+        _types = {
+            NoneClass,
+            frozendict.frozendict,
+            tuple,
+            str,
+            decimal.Decimal,
+            BoolClass,
+            FileIO,
+            bytes,
+        }
 
 
 class ValidatorBase:
@@ -989,12 +1161,16 @@ def _is_json_validation_enabled_oapg(schema_keyword, configuration=None):
             configuration (Configuration): the configuration class.
         """
 
-        return (configuration is None or
-                not hasattr(configuration, '_disabled_client_side_validations') or
-                schema_keyword not in configuration._disabled_client_side_validations)
+        return (
+            configuration is None
+            or not hasattr(configuration, "_disabled_client_side_validations")
+            or schema_keyword not in configuration._disabled_client_side_validations
+        )
 
     @staticmethod
-    def _raise_validation_errror_message_oapg(value, constraint_msg, constraint_value, path_to_item, additional_txt=""):
+    def _raise_validation_errror_message_oapg(
+        value, constraint_msg, constraint_value, path_to_item, additional_txt=""
+    ):
         raise ApiValueError(
             "Invalid value `{value}`, {constraint_msg} `{constraint_value}`{additional_txt} at {path_to_item}".format(
                 value=value,
@@ -1012,7 +1188,20 @@ def _validate_oapg(
         cls,
         arg,
         validation_metadata: ValidationMetadata,
-    ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]:
+    ) -> typing.Dict[
+        typing.Tuple[typing.Union[str, int], ...],
+        typing.Set[
+            typing.Union[
+                "Schema",
+                str,
+                decimal.Decimal,
+                BoolClass,
+                NoneClass,
+                frozendict.frozendict,
+                tuple,
+            ]
+        ],
+    ]:
         """
         EnumBase _validate_oapg
         Validates that arg is in the enum's allowed values
@@ -1020,7 +1209,11 @@ def _validate_oapg(
         try:
             cls.MetaOapg.enum_value_to_name[arg]
         except KeyError:
-            raise ApiValueError("Invalid value {} passed in to {}, allowed_values={}".format(arg, cls, cls.MetaOapg.enum_value_to_name.keys()))
+            raise ApiValueError(
+                "Invalid value {} passed in to {}, allowed_values={}".format(
+                    arg, cls, cls.MetaOapg.enum_value_to_name.keys()
+                )
+            )
         return super()._validate_oapg(arg, validation_metadata=validation_metadata)
 
 
@@ -1064,68 +1257,73 @@ def as_str_oapg(self) -> str:
 
     @property
     def as_date_oapg(self) -> date:
-        raise Exception('not implemented')
+        raise Exception("not implemented")
 
     @property
     def as_datetime_oapg(self) -> datetime:
-        raise Exception('not implemented')
+        raise Exception("not implemented")
 
     @property
     def as_decimal_oapg(self) -> decimal.Decimal:
-        raise Exception('not implemented')
+        raise Exception("not implemented")
 
     @property
     def as_uuid_oapg(self) -> uuid.UUID:
-        raise Exception('not implemented')
+        raise Exception("not implemented")
 
     @classmethod
-    def __check_str_validations(
-        cls,
-        arg: str,
-        validation_metadata: ValidationMetadata
-    ):
-        if not hasattr(cls, 'MetaOapg'):
+    def __check_str_validations(cls, arg: str, validation_metadata: ValidationMetadata):
+        if not hasattr(cls, "MetaOapg"):
             return
-        if (cls._is_json_validation_enabled_oapg('maxLength', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'max_length') and
-                len(arg) > cls.MetaOapg.max_length):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "maxLength", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "max_length")
+            and len(arg) > cls.MetaOapg.max_length
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="length must be less than or equal to",
                 constraint_value=cls.MetaOapg.max_length,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('minLength', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'min_length') and
-                len(arg) < cls.MetaOapg.min_length):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "minLength", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "min_length")
+            and len(arg) < cls.MetaOapg.min_length
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="length must be greater than or equal to",
                 constraint_value=cls.MetaOapg.min_length,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('pattern', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'regex')):
+        if cls._is_json_validation_enabled_oapg(
+            "pattern", validation_metadata.configuration
+        ) and hasattr(cls.MetaOapg, "regex"):
             for regex_dict in cls.MetaOapg.regex:
-                flags = regex_dict.get('flags', 0)
-                if not re.search(regex_dict['pattern'], arg, flags=flags):
+                flags = regex_dict.get("flags", 0)
+                if not re.search(regex_dict["pattern"], arg, flags=flags):
                     if flags != 0:
                         # Don't print the regex flags if the flags are not
                         # specified in the OAS document.
                         cls._raise_validation_errror_message_oapg(
                             value=arg,
                             constraint_msg="must match regular expression",
-                            constraint_value=regex_dict['pattern'],
+                            constraint_value=regex_dict["pattern"],
                             path_to_item=validation_metadata.path_to_item,
-                            additional_txt=" with flags=`{}`".format(flags)
+                            additional_txt=" with flags=`{}`".format(flags),
                         )
                     cls._raise_validation_errror_message_oapg(
                         value=arg,
                         constraint_msg="must match regular expression",
-                        constraint_value=regex_dict['pattern'],
-                        path_to_item=validation_metadata.path_to_item
+                        constraint_value=regex_dict["pattern"],
+                        path_to_item=validation_metadata.path_to_item,
                     )
 
     @classmethod
@@ -1133,7 +1331,20 @@ def _validate_oapg(
         cls,
         arg,
         validation_metadata: ValidationMetadata,
-    ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]:
+    ) -> typing.Dict[
+        typing.Tuple[typing.Union[str, int], ...],
+        typing.Set[
+            typing.Union[
+                "Schema",
+                str,
+                decimal.Decimal,
+                BoolClass,
+                NoneClass,
+                frozendict.frozendict,
+                tuple,
+            ]
+        ],
+    ]:
         """
         StrBase _validate_oapg
         Validates that validations pass
@@ -1150,14 +1361,18 @@ def as_uuid_oapg(self) -> uuid.UUID:
         return uuid.UUID(self)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata
+    ):
         if isinstance(arg, str):
             try:
                 uuid.UUID(arg)
                 return True
             except ValueError:
                 raise ApiValueError(
-                    "Invalid value '{}' for type UUID at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type UUID at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -1179,17 +1394,17 @@ class CustomIsoparser(isoparser):
     def parse_isodatetime(self, dt_str):
         components, pos = self._parse_isodate(dt_str)
         if len(dt_str) > pos:
-            if self._sep is None or dt_str[pos:pos + 1] == self._sep:
-                components += self._parse_isotime(dt_str[pos + 1:])
+            if self._sep is None or dt_str[pos : pos + 1] == self._sep:
+                components += self._parse_isotime(dt_str[pos + 1 :])
             else:
-                raise ValueError('String contains unknown ISO components')
+                raise ValueError("String contains unknown ISO components")
 
         if len(components) > 3 and components[3] == 24:
             components[3] = 0
             return datetime(*components) + timedelta(days=1)
 
         if len(components) <= 3:
-            raise ValueError('Value is not a datetime')
+            raise ValueError("Value is not a datetime")
 
         return datetime(*components)
 
@@ -1198,10 +1413,10 @@ def parse_isodate(self, datestr):
         components, pos = self._parse_isodate(datestr)
 
         if len(datestr) > pos:
-            raise ValueError('String contains invalid time components')
+            raise ValueError("String contains invalid time components")
 
         if len(components) > 3:
-            raise ValueError('String contains invalid time components')
+            raise ValueError("String contains invalid time components")
 
         return date(*components)
 
@@ -1216,7 +1431,9 @@ def as_date_oapg(self) -> date:
         return DEFAULT_ISOPARSER.parse_isodate(self)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata
+    ):
         if isinstance(arg, str):
             try:
                 DEFAULT_ISOPARSER.parse_isodate(arg)
@@ -1224,7 +1441,9 @@ def __validate_format(cls, arg: typing.Optional[str], validation_metadata: Valid
             except ValueError:
                 raise ApiValueError(
                     "Value does not conform to the required ISO-8601 date format. "
-                    "Invalid value '{}' for type date at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type date at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -1247,7 +1466,9 @@ def as_datetime_oapg(self) -> datetime:
         return DEFAULT_ISOPARSER.parse_isodatetime(self)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata
+    ):
         if isinstance(arg, str):
             try:
                 DEFAULT_ISOPARSER.parse_isodatetime(arg)
@@ -1255,7 +1476,9 @@ def __validate_format(cls, arg: typing.Optional[str], validation_metadata: Valid
             except ValueError:
                 raise ApiValueError(
                     "Value does not conform to the required ISO-8601 datetime format. "
-                    "Invalid value '{}' for type datetime at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type datetime at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -1284,7 +1507,9 @@ def as_decimal_oapg(self) -> decimal.Decimal:
         return decimal.Decimal(self)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls, arg: typing.Optional[str], validation_metadata: ValidationMetadata
+    ):
         if isinstance(arg, str):
             try:
                 decimal.Decimal(arg)
@@ -1292,7 +1517,9 @@ def __validate_format(cls, arg: typing.Optional[str], validation_metadata: Valid
             except decimal.InvalidOperation:
                 raise ApiValueError(
                     "Value cannot be converted to a decimal. "
-                    "Invalid value '{}' for type decimal at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type decimal at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -1327,7 +1554,7 @@ def as_int_oapg(self) -> int:
             if self.as_tuple().exponent < 0:
                 # this could be represented as an integer but should be represented as a float
                 # because that's what it was serialized from
-                raise ApiValueError(f'{self} is not an integer')
+                raise ApiValueError(f"{self} is not an integer")
             self._as_int = int(self)
             return self._as_int
 
@@ -1337,79 +1564,93 @@ def as_float_oapg(self) -> float:
             return self._as_float
         except AttributeError:
             if self.as_tuple().exponent >= 0:
-                raise ApiValueError(f'{self} is not an float')
+                raise ApiValueError(f"{self} is not an float")
             self._as_float = float(self)
             return self._as_float
 
     @classmethod
-    def __check_numeric_validations(
-        cls,
-        arg,
-        validation_metadata: ValidationMetadata
-    ):
-        if not hasattr(cls, 'MetaOapg'):
+    def __check_numeric_validations(cls, arg, validation_metadata: ValidationMetadata):
+        if not hasattr(cls, "MetaOapg"):
             return
-        if cls._is_json_validation_enabled_oapg('multipleOf',
-                                                validation_metadata.configuration) and hasattr(cls.MetaOapg, 'multiple_of'):
+        if cls._is_json_validation_enabled_oapg(
+            "multipleOf", validation_metadata.configuration
+        ) and hasattr(cls.MetaOapg, "multiple_of"):
             multiple_of_value = cls.MetaOapg.multiple_of
-            if (not (float(arg) / multiple_of_value).is_integer()):
+            if not (float(arg) / multiple_of_value).is_integer():
                 # Note 'multipleOf' will be as good as the floating point arithmetic.
                 cls._raise_validation_errror_message_oapg(
                     value=arg,
                     constraint_msg="value must be a multiple of",
                     constraint_value=multiple_of_value,
-                    path_to_item=validation_metadata.path_to_item
+                    path_to_item=validation_metadata.path_to_item,
                 )
 
         checking_max_or_min_values = any(
-            hasattr(cls.MetaOapg, validation_key) for validation_key in {
-                'exclusive_maximum',
-                'inclusive_maximum',
-                'exclusive_minimum',
-                'inclusive_minimum',
+            hasattr(cls.MetaOapg, validation_key)
+            for validation_key in {
+                "exclusive_maximum",
+                "inclusive_maximum",
+                "exclusive_minimum",
+                "inclusive_minimum",
             }
         )
         if not checking_max_or_min_values:
             return
 
-        if (cls._is_json_validation_enabled_oapg('exclusiveMaximum', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'exclusive_maximum') and
-                arg >= cls.MetaOapg.exclusive_maximum):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "exclusiveMaximum", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "exclusive_maximum")
+            and arg >= cls.MetaOapg.exclusive_maximum
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="must be a value less than",
                 constraint_value=cls.MetaOapg.exclusive_maximum,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('maximum', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'inclusive_maximum') and
-                arg > cls.MetaOapg.inclusive_maximum):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "maximum", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "inclusive_maximum")
+            and arg > cls.MetaOapg.inclusive_maximum
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="must be a value less than or equal to",
                 constraint_value=cls.MetaOapg.inclusive_maximum,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('exclusiveMinimum', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'exclusive_minimum') and
-                arg <= cls.MetaOapg.exclusive_minimum):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "exclusiveMinimum", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "exclusive_minimum")
+            and arg <= cls.MetaOapg.exclusive_minimum
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="must be a value greater than",
                 constraint_value=cls.MetaOapg.exclusive_maximum,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('minimum', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'inclusive_minimum') and
-                arg < cls.MetaOapg.inclusive_minimum):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "minimum", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "inclusive_minimum")
+            and arg < cls.MetaOapg.inclusive_minimum
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="must be a value greater than or equal to",
                 constraint_value=cls.MetaOapg.inclusive_minimum,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
     @classmethod
@@ -1417,7 +1658,20 @@ def _validate_oapg(
         cls,
         arg,
         validation_metadata: ValidationMetadata,
-    ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]:
+    ) -> typing.Dict[
+        typing.Tuple[typing.Union[str, int], ...],
+        typing.Set[
+            typing.Union[
+                "Schema",
+                str,
+                decimal.Decimal,
+                BoolClass,
+                NoneClass,
+                frozendict.frozendict,
+                tuple,
+            ]
+        ],
+    ]:
         """
         NumberBase _validate_oapg
         Validates that validations pass
@@ -1447,58 +1701,71 @@ def __validate_items(cls, list_items, validation_metadata: ValidationMetadata):
 
         # if we have definitions for an items schema, use it
         # otherwise accept anything
-        item_cls = getattr(cls.MetaOapg, 'items', UnsetAnyTypeSchema)
+        item_cls = getattr(cls.MetaOapg, "items", UnsetAnyTypeSchema)
         item_cls = cls._get_class_oapg(item_cls)
         path_to_schemas = {}
         for i, value in enumerate(list_items):
             item_validation_metadata = ValidationMetadata(
                 from_server=validation_metadata.from_server,
                 configuration=validation_metadata.configuration,
-                path_to_item=validation_metadata.path_to_item+(i,),
-                validated_path_to_schemas=validation_metadata.validated_path_to_schemas
+                path_to_item=validation_metadata.path_to_item + (i,),
+                validated_path_to_schemas=validation_metadata.validated_path_to_schemas,
             )
             if item_validation_metadata.validation_ran_earlier(item_cls):
                 continue
             other_path_to_schemas = item_cls._validate_oapg(
-                value, validation_metadata=item_validation_metadata)
+                value, validation_metadata=item_validation_metadata
+            )
             update(path_to_schemas, other_path_to_schemas)
         return path_to_schemas
 
     @classmethod
-    def __check_tuple_validations(
-            cls, arg,
-            validation_metadata: ValidationMetadata):
-        if not hasattr(cls, 'MetaOapg'):
+    def __check_tuple_validations(cls, arg, validation_metadata: ValidationMetadata):
+        if not hasattr(cls, "MetaOapg"):
             return
-        if (cls._is_json_validation_enabled_oapg('maxItems', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'max_items') and
-                len(arg) > cls.MetaOapg.max_items):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "maxItems", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "max_items")
+            and len(arg) > cls.MetaOapg.max_items
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="number of items must be less than or equal to",
                 constraint_value=cls.MetaOapg.max_items,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('minItems', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'min_items') and
-                len(arg) < cls.MetaOapg.min_items):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "minItems", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "min_items")
+            and len(arg) < cls.MetaOapg.min_items
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="number of items must be greater than or equal to",
                 constraint_value=cls.MetaOapg.min_items,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('uniqueItems', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'unique_items') and cls.MetaOapg.unique_items and arg):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "uniqueItems", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "unique_items")
+            and cls.MetaOapg.unique_items
+            and arg
+        ):
             unique_items = set(arg)
             if len(arg) > len(unique_items):
                 cls._raise_validation_errror_message_oapg(
                     value=arg,
                     constraint_msg="duplicate items were found, and the tuple must not contain duplicates because",
-                    constraint_value='unique_items==True',
-                    path_to_item=validation_metadata.path_to_item
+                    constraint_value="unique_items==True",
+                    path_to_item=validation_metadata.path_to_item,
                 )
 
     @classmethod
@@ -1524,7 +1791,9 @@ def _validate_oapg(
         """
         if isinstance(arg, tuple):
             cls.__check_tuple_validations(arg, validation_metadata)
-        _path_to_schemas = super()._validate_oapg(arg, validation_metadata=validation_metadata)
+        _path_to_schemas = super()._validate_oapg(
+            arg, validation_metadata=validation_metadata
+        )
         if not isinstance(arg, tuple):
             return _path_to_schemas
         updated_vm = ValidationMetadata(
@@ -1532,31 +1801,33 @@ def _validate_oapg(
             from_server=validation_metadata.from_server,
             path_to_item=validation_metadata.path_to_item,
             seen_classes=validation_metadata.seen_classes | frozenset({cls}),
-            validated_path_to_schemas=validation_metadata.validated_path_to_schemas
+            validated_path_to_schemas=validation_metadata.validated_path_to_schemas,
+        )
+        other_path_to_schemas = cls.__validate_items(
+            arg, validation_metadata=updated_vm
         )
-        other_path_to_schemas = cls.__validate_items(arg, validation_metadata=updated_vm)
         update(_path_to_schemas, other_path_to_schemas)
         return _path_to_schemas
 
     @classmethod
     def _get_items_oapg(
-        cls: 'Schema',
+        cls: "Schema",
         arg: typing.List[typing.Any],
         path_to_item: typing.Tuple[typing.Union[str, int], ...],
-        path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']]
+        path_to_schemas: typing.Dict[
+            typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]
+        ],
     ):
-        '''
+        """
         ListBase _get_items_oapg
-        '''
+        """
         cast_items = []
 
         for i, value in enumerate(arg):
             item_path_to_item = path_to_item + (i,)
             item_cls = path_to_schemas[item_path_to_item]
             new_value = item_cls._get_new_instance_without_conversion_oapg(
-                value,
-                item_path_to_item,
-                path_to_schemas
+                value, item_path_to_item, path_to_schemas
             )
             cast_items.append(new_value)
 
@@ -1567,20 +1838,26 @@ class Discriminable:
     MetaOapg: MetaOapgTyped
 
     @classmethod
-    def _ensure_discriminator_value_present_oapg(cls, disc_property_name: str, validation_metadata: ValidationMetadata, *args):
+    def _ensure_discriminator_value_present_oapg(
+        cls, disc_property_name: str, validation_metadata: ValidationMetadata, *args
+    ):
         if not args or args and disc_property_name not in args[0]:
             # The input data does not contain the discriminator property
             raise ApiValueError(
                 "Cannot deserialize input data due to missing discriminator. "
-                "The discriminator property '{}' is missing at path: {}".format(disc_property_name, validation_metadata.path_to_item)
+                "The discriminator property '{}' is missing at path: {}".format(
+                    disc_property_name, validation_metadata.path_to_item
+                )
             )
 
     @classmethod
-    def get_discriminated_class_oapg(cls, disc_property_name: str, disc_payload_value: str):
+    def get_discriminated_class_oapg(
+        cls, disc_property_name: str, disc_payload_value: str
+    ):
         """
         Used in schemas with discriminators
         """
-        if not hasattr(cls.MetaOapg, 'discriminator'):
+        if not hasattr(cls.MetaOapg, "discriminator"):
             return None
         disc = cls.MetaOapg.discriminator()
         if disc_property_name not in disc:
@@ -1588,31 +1865,37 @@ def get_discriminated_class_oapg(cls, disc_property_name: str, disc_payload_valu
         discriminated_cls = disc[disc_property_name].get(disc_payload_value)
         if discriminated_cls is not None:
             return discriminated_cls
-        if not hasattr(cls, 'MetaOapg'):
+        if not hasattr(cls, "MetaOapg"):
             return None
         elif not (
-            hasattr(cls.MetaOapg, 'all_of') or
-            hasattr(cls.MetaOapg, 'one_of') or
-            hasattr(cls.MetaOapg, 'any_of')
+            hasattr(cls.MetaOapg, "all_of")
+            or hasattr(cls.MetaOapg, "one_of")
+            or hasattr(cls.MetaOapg, "any_of")
         ):
             return None
         # TODO stop traveling if a cycle is hit
-        if hasattr(cls.MetaOapg, 'all_of'):
+        if hasattr(cls.MetaOapg, "all_of"):
             for allof_cls in cls.MetaOapg.all_of():
                 discriminated_cls = allof_cls.get_discriminated_class_oapg(
-                    disc_property_name=disc_property_name, disc_payload_value=disc_payload_value)
+                    disc_property_name=disc_property_name,
+                    disc_payload_value=disc_payload_value,
+                )
                 if discriminated_cls is not None:
                     return discriminated_cls
-        if hasattr(cls.MetaOapg, 'one_of'):
+        if hasattr(cls.MetaOapg, "one_of"):
             for oneof_cls in cls.MetaOapg.one_of():
                 discriminated_cls = oneof_cls.get_discriminated_class_oapg(
-                    disc_property_name=disc_property_name, disc_payload_value=disc_payload_value)
+                    disc_property_name=disc_property_name,
+                    disc_payload_value=disc_payload_value,
+                )
                 if discriminated_cls is not None:
                     return discriminated_cls
-        if hasattr(cls.MetaOapg, 'any_of'):
+        if hasattr(cls.MetaOapg, "any_of"):
             for anyof_cls in cls.MetaOapg.any_of():
                 discriminated_cls = anyof_cls.get_discriminated_class_oapg(
-                    disc_property_name=disc_property_name, disc_payload_value=disc_payload_value)
+                    disc_property_name=disc_property_name,
+                    disc_payload_value=disc_payload_value,
+                )
                 if discriminated_cls is not None:
                     return discriminated_cls
         return None
@@ -1642,10 +1925,12 @@ def __validate_arg_presence(cls, arg):
         """
         seen_required_properties = set()
         invalid_arguments = []
-        required_property_names = getattr(cls.MetaOapg, 'required', set())
-        additional_properties = getattr(cls.MetaOapg, 'additional_properties', UnsetAnyTypeSchema)
-        properties = getattr(cls.MetaOapg, 'properties', {})
-        property_annotations = getattr(properties, '__annotations__', {})
+        required_property_names = getattr(cls.MetaOapg, "required", set())
+        additional_properties = getattr(
+            cls.MetaOapg, "additional_properties", UnsetAnyTypeSchema
+        )
+        properties = getattr(cls.MetaOapg, "properties", {})
+        property_annotations = getattr(properties, "__annotations__", {})
         for property_name in arg:
             if property_name in required_property_names:
                 seen_required_properties.add(property_name)
@@ -1655,7 +1940,9 @@ def __validate_arg_presence(cls, arg):
                 continue
             else:
                 invalid_arguments.append(property_name)
-        missing_required_arguments = list(required_property_names - seen_required_properties)
+        missing_required_arguments = list(
+            required_property_names - seen_required_properties
+        )
         if missing_required_arguments:
             missing_required_arguments.sort()
             raise ApiTypeError(
@@ -1663,7 +1950,7 @@ def __validate_arg_presence(cls, arg):
                     cls.__name__,
                     len(missing_required_arguments),
                     "s" if len(missing_required_arguments) > 1 else "",
-                    missing_required_arguments
+                    missing_required_arguments,
                 )
             )
         if invalid_arguments:
@@ -1673,7 +1960,7 @@ def __validate_arg_presence(cls, arg):
                     cls.__name__,
                     len(invalid_arguments),
                     "s" if len(invalid_arguments) > 1 else "",
-                    invalid_arguments
+                    invalid_arguments,
                 )
             )
 
@@ -1692,11 +1979,13 @@ def __validate_args(cls, arg, validation_metadata: ValidationMetadata):
             ApiTypeError - for missing required arguments, or for invalid properties
         """
         path_to_schemas = {}
-        additional_properties = getattr(cls.MetaOapg, 'additional_properties', UnsetAnyTypeSchema)
-        properties = getattr(cls.MetaOapg, 'properties', {})
-        property_annotations = getattr(properties, '__annotations__', {})
+        additional_properties = getattr(
+            cls.MetaOapg, "additional_properties", UnsetAnyTypeSchema
+        )
+        properties = getattr(cls.MetaOapg, "properties", {})
+        property_annotations = getattr(properties, "__annotations__", {})
         for property_name, value in arg.items():
-            path_to_item = validation_metadata.path_to_item+(property_name,)
+            path_to_item = validation_metadata.path_to_item + (property_name,)
             if property_name in property_annotations:
                 schema = property_annotations[property_name]
             elif additional_properties is not NotAnyTypeSchema:
@@ -1710,48 +1999,56 @@ def __validate_args(cls, arg, validation_metadata: ValidationMetadata):
                         continue
                 schema = additional_properties
             else:
-                raise ApiTypeError('Unable to find schema for value={} in class={} at path_to_item={}'.format(
-                    value, cls, validation_metadata.path_to_item+(property_name,)
-                ))
+                raise ApiTypeError(
+                    "Unable to find schema for value={} in class={} at path_to_item={}".format(
+                        value, cls, validation_metadata.path_to_item + (property_name,)
+                    )
+                )
             schema = cls._get_class_oapg(schema)
             arg_validation_metadata = ValidationMetadata(
                 from_server=validation_metadata.from_server,
                 configuration=validation_metadata.configuration,
                 path_to_item=path_to_item,
-                validated_path_to_schemas=validation_metadata.validated_path_to_schemas
+                validated_path_to_schemas=validation_metadata.validated_path_to_schemas,
             )
             if arg_validation_metadata.validation_ran_earlier(schema):
                 continue
-            other_path_to_schemas = schema._validate_oapg(value, validation_metadata=arg_validation_metadata)
+            other_path_to_schemas = schema._validate_oapg(
+                value, validation_metadata=arg_validation_metadata
+            )
             update(path_to_schemas, other_path_to_schemas)
         return path_to_schemas
 
     @classmethod
-    def __check_dict_validations(
-        cls,
-        arg,
-        validation_metadata: ValidationMetadata
-    ):
-        if not hasattr(cls, 'MetaOapg'):
+    def __check_dict_validations(cls, arg, validation_metadata: ValidationMetadata):
+        if not hasattr(cls, "MetaOapg"):
             return
-        if (cls._is_json_validation_enabled_oapg('maxProperties', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'max_properties') and
-                len(arg) > cls.MetaOapg.max_properties):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "maxProperties", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "max_properties")
+            and len(arg) > cls.MetaOapg.max_properties
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="number of properties must be less than or equal to",
                 constraint_value=cls.MetaOapg.max_properties,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
-        if (cls._is_json_validation_enabled_oapg('minProperties', validation_metadata.configuration) and
-                hasattr(cls.MetaOapg, 'min_properties') and
-                len(arg) < cls.MetaOapg.min_properties):
+        if (
+            cls._is_json_validation_enabled_oapg(
+                "minProperties", validation_metadata.configuration
+            )
+            and hasattr(cls.MetaOapg, "min_properties")
+            and len(arg) < cls.MetaOapg.min_properties
+        ):
             cls._raise_validation_errror_message_oapg(
                 value=arg,
                 constraint_msg="number of properties must be greater than or equal to",
                 constraint_value=cls.MetaOapg.min_properties,
-                path_to_item=validation_metadata.path_to_item
+                path_to_item=validation_metadata.path_to_item,
             )
 
     @classmethod
@@ -1777,11 +2074,15 @@ def _validate_oapg(
         """
         if isinstance(arg, frozendict.frozendict):
             cls.__check_dict_validations(arg, validation_metadata)
-        _path_to_schemas = super()._validate_oapg(arg, validation_metadata=validation_metadata)
+        _path_to_schemas = super()._validate_oapg(
+            arg, validation_metadata=validation_metadata
+        )
         if not isinstance(arg, frozendict.frozendict):
             return _path_to_schemas
         cls.__validate_arg_presence(arg)
-        other_path_to_schemas = cls.__validate_args(arg, validation_metadata=validation_metadata)
+        other_path_to_schemas = cls.__validate_args(
+            arg, validation_metadata=validation_metadata
+        )
         update(_path_to_schemas, other_path_to_schemas)
         try:
             discriminator = cls.MetaOapg.discriminator()
@@ -1789,16 +2090,19 @@ def _validate_oapg(
             return _path_to_schemas
         # discriminator exists
         disc_prop_name = list(discriminator.keys())[0]
-        cls._ensure_discriminator_value_present_oapg(disc_prop_name, validation_metadata, arg)
+        cls._ensure_discriminator_value_present_oapg(
+            disc_prop_name, validation_metadata, arg
+        )
         discriminated_cls = cls.get_discriminated_class_oapg(
-            disc_property_name=disc_prop_name, disc_payload_value=arg[disc_prop_name])
+            disc_property_name=disc_prop_name, disc_payload_value=arg[disc_prop_name]
+        )
         if discriminated_cls is None:
             raise ApiValueError(
                 "Invalid discriminator value was passed in to {}.{} Only the values {} are allowed at {}".format(
                     cls.__name__,
                     disc_prop_name,
                     list(discriminator[disc_prop_name].keys()),
-                    validation_metadata.path_to_item + (disc_prop_name,)
+                    validation_metadata.path_to_item + (disc_prop_name,),
                 )
             )
         updated_vm = ValidationMetadata(
@@ -1806,11 +2110,13 @@ def _validate_oapg(
             from_server=validation_metadata.from_server,
             path_to_item=validation_metadata.path_to_item,
             seen_classes=validation_metadata.seen_classes | frozenset({cls}),
-            validated_path_to_schemas=validation_metadata.validated_path_to_schemas
+            validated_path_to_schemas=validation_metadata.validated_path_to_schemas,
         )
         if updated_vm.validation_ran_earlier(discriminated_cls):
             return _path_to_schemas
-        other_path_to_schemas = discriminated_cls._validate_oapg(arg, validation_metadata=updated_vm)
+        other_path_to_schemas = discriminated_cls._validate_oapg(
+            arg, validation_metadata=updated_vm
+        )
         update(_path_to_schemas, other_path_to_schemas)
         return _path_to_schemas
 
@@ -1819,7 +2125,9 @@ def _get_properties_oapg(
         cls,
         arg: typing.Dict[str, typing.Any],
         path_to_item: typing.Tuple[typing.Union[str, int], ...],
-        path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']]
+        path_to_schemas: typing.Dict[
+            typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]
+        ],
     ):
         """
         DictBase _get_properties_oapg, this is how properties are set
@@ -1831,9 +2139,7 @@ def _get_properties_oapg(
             property_path_to_item = path_to_item + (property_name_js,)
             property_cls = path_to_schemas[property_path_to_item]
             new_value = property_cls._get_new_instance_without_conversion_oapg(
-                value,
-                property_path_to_item,
-                path_to_schemas
+                value, property_path_to_item, path_to_schemas
             )
             dict_items[property_name_js] = new_value
 
@@ -1841,7 +2147,9 @@ def _get_properties_oapg(
 
     def __setattr__(self, name: str, value: typing.Any):
         if not isinstance(self, FileIO):
-            raise AttributeError('property setting not supported on immutable instances')
+            raise AttributeError(
+                "property setting not supported on immutable instances"
+            )
 
     def __getattr__(self, name: str):
         """
@@ -1868,7 +2176,7 @@ def __getitem__(self, name: str):
             return super().__getattr__(name)
         return super().__getitem__(name)
 
-    def get_item_oapg(self, name: str) -> typing.Union['AnyTypeSchema', Unset]:
+    def get_item_oapg(self, name: str) -> typing.Union["AnyTypeSchema", Unset]:
         # dict_instance[name] accessor
         if not isinstance(self, frozendict.frozendict):
             raise NotImplementedError()
@@ -1879,11 +2187,50 @@ def get_item_oapg(self, name: str) -> typing.Union['AnyTypeSchema', Unset]:
 
 
 def cast_to_allowed_types(
-    arg: typing.Union[str, date, datetime, uuid.UUID, decimal.Decimal, int, float, None, dict, frozendict.frozendict, list, tuple, bytes, Schema, io.FileIO, io.BufferedReader],
+    arg: typing.Union[
+        str,
+        date,
+        datetime,
+        uuid.UUID,
+        decimal.Decimal,
+        int,
+        float,
+        None,
+        dict,
+        frozendict.frozendict,
+        list,
+        tuple,
+        bytes,
+        Schema,
+        io.FileIO,
+        io.BufferedReader,
+    ],
     from_server: bool,
-    validated_path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]],
-    path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(['args[0]']),
-) -> typing.Union[frozendict.frozendict, tuple, decimal.Decimal, str, bytes, BoolClass, NoneClass, FileIO]:
+    validated_path_to_schemas: typing.Dict[
+        typing.Tuple[typing.Union[str, int], ...],
+        typing.Set[
+            typing.Union[
+                "Schema",
+                str,
+                decimal.Decimal,
+                BoolClass,
+                NoneClass,
+                frozendict.frozendict,
+                tuple,
+            ]
+        ],
+    ],
+    path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(["args[0]"]),
+) -> typing.Union[
+    frozendict.frozendict,
+    tuple,
+    decimal.Decimal,
+    str,
+    bytes,
+    BoolClass,
+    NoneClass,
+    FileIO,
+]:
     """
     Casts the input payload arg into the allowed types
     The input validated_path_to_schemas is mutated by running this function
@@ -1907,7 +2254,10 @@ def cast_to_allowed_types(
     if isinstance(arg, Schema):
         # store the already run validations
         schema_classes = set()
-        source_schema_was_unset = len(arg.__class__.__bases__) == 2 and UnsetAnyTypeSchema in arg.__class__.__bases__
+        source_schema_was_unset = (
+            len(arg.__class__.__bases__) == 2
+            and UnsetAnyTypeSchema in arg.__class__.__bases__
+        )
         if not source_schema_was_unset:
             """
             Do not include UnsetAnyTypeSchema and its base class because
@@ -1921,11 +2271,20 @@ def cast_to_allowed_types(
                 schema_classes.add(cls)
         validated_path_to_schemas[path_to_item] = schema_classes
 
-    type_error = ApiTypeError(f"Invalid type. Required value type is str and passed type was {type(arg)} at {path_to_item}")
+    type_error = ApiTypeError(
+        f"Invalid type. Required value type is str and passed type was {type(arg)} at {path_to_item}"
+    )
     if isinstance(arg, str):
         return str(arg)
     elif isinstance(arg, (dict, frozendict.frozendict)):
-        return frozendict.frozendict({key: cast_to_allowed_types(val, from_server, validated_path_to_schemas, path_to_item + (key,)) for key, val in arg.items()})
+        return frozendict.frozendict(
+            {
+                key: cast_to_allowed_types(
+                    val, from_server, validated_path_to_schemas, path_to_item + (key,)
+                )
+                for key, val in arg.items()
+            }
+        )
     elif isinstance(arg, (bool, BoolClass)):
         """
         this check must come before isinstance(arg, (int, float))
@@ -1941,10 +2300,17 @@ def cast_to_allowed_types(
         if decimal_from_float.as_integer_ratio()[1] == 1:
             # 9.0 -> Decimal('9.0')
             # 3.4028234663852886e+38 -> Decimal('340282346638528859811704183484516925440.0')
-            return decimal.Decimal(str(decimal_from_float)+'.0')
+            return decimal.Decimal(str(decimal_from_float) + ".0")
         return decimal_from_float
     elif isinstance(arg, (tuple, list)):
-        return tuple([cast_to_allowed_types(item, from_server, validated_path_to_schemas, path_to_item + (i,)) for i, item in enumerate(arg)])
+        return tuple(
+            [
+                cast_to_allowed_types(
+                    item, from_server, validated_path_to_schemas, path_to_item + (i,)
+                )
+                for i, item in enumerate(arg)
+            ]
+        )
     elif isinstance(arg, (none_type, NoneClass)):
         return NoneClass.NONE
     elif isinstance(arg, (date, datetime)):
@@ -1961,7 +2327,9 @@ def cast_to_allowed_types(
         return bytes(arg)
     elif isinstance(arg, (io.FileIO, io.BufferedReader)):
         return FileIO(arg)
-    raise ValueError('Invalid type passed in got input={} type={}'.format(arg, type(arg)))
+    raise ValueError(
+        "Invalid type passed in got input={} type={}".format(arg, type(arg))
+    )
 
 
 class ComposedBase(Discriminable):
@@ -1972,7 +2340,9 @@ def __get_allof_classes(cls, arg, validation_metadata: ValidationMetadata):
         for allof_cls in cls.MetaOapg.all_of():
             if validation_metadata.validation_ran_earlier(allof_cls):
                 continue
-            other_path_to_schemas = allof_cls._validate_oapg(arg, validation_metadata=validation_metadata)
+            other_path_to_schemas = allof_cls._validate_oapg(
+                arg, validation_metadata=validation_metadata
+            )
             update(path_to_schemas, other_path_to_schemas)
         return path_to_schemas
 
@@ -1993,7 +2363,9 @@ def __get_oneof_class(
                 oneof_classes.append(oneof_cls)
                 continue
             try:
-                path_to_schemas = oneof_cls._validate_oapg(arg, validation_metadata=validation_metadata)
+                path_to_schemas = oneof_cls._validate_oapg(
+                    arg, validation_metadata=validation_metadata
+                )
             except (ApiValueError, ApiTypeError) as ex:
                 if discriminated_cls is not None and oneof_cls is discriminated_cls:
                     raise ex
@@ -2007,17 +2379,16 @@ def __get_oneof_class(
         elif len(oneof_classes) > 1:
             raise ApiValueError(
                 "Invalid inputs given to generate an instance of {}. Multiple "
-                "oneOf schemas {} matched the inputs, but a max of one is allowed.".format(cls, oneof_classes)
+                "oneOf schemas {} matched the inputs, but a max of one is allowed.".format(
+                    cls, oneof_classes
+                )
             )
         # exactly one class matches
         return path_to_schemas
 
     @classmethod
     def __get_anyof_classes(
-        cls,
-        arg,
-        discriminated_cls,
-        validation_metadata: ValidationMetadata
+        cls, arg, discriminated_cls, validation_metadata: ValidationMetadata
     ):
         anyof_classes = []
         path_to_schemas = defaultdict(set)
@@ -2027,7 +2398,9 @@ def __get_anyof_classes(
                 continue
 
             try:
-                other_path_to_schemas = anyof_cls._validate_oapg(arg, validation_metadata=validation_metadata)
+                other_path_to_schemas = anyof_cls._validate_oapg(
+                    arg, validation_metadata=validation_metadata
+                )
             except (ApiValueError, ApiTypeError) as ex:
                 if discriminated_cls is not None and anyof_cls is discriminated_cls:
                     raise ex
@@ -2046,7 +2419,20 @@ def _validate_oapg(
         cls,
         arg,
         validation_metadata: ValidationMetadata,
-    ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]:
+    ) -> typing.Dict[
+        typing.Tuple[typing.Union[str, int], ...],
+        typing.Set[
+            typing.Union[
+                "Schema",
+                str,
+                decimal.Decimal,
+                BoolClass,
+                NoneClass,
+                frozendict.frozendict,
+                tuple,
+            ]
+        ],
+    ]:
         """
         ComposedBase _validate_oapg
         We return dynamic classes of different bases depending upon the inputs
@@ -2063,27 +2449,33 @@ def _validate_oapg(
             ApiTypeError: when the input type is not in the list of allowed spec types
         """
         # validation checking on types, validations, and enums
-        path_to_schemas = super()._validate_oapg(arg, validation_metadata=validation_metadata)
+        path_to_schemas = super()._validate_oapg(
+            arg, validation_metadata=validation_metadata
+        )
 
         updated_vm = ValidationMetadata(
             configuration=validation_metadata.configuration,
             from_server=validation_metadata.from_server,
             path_to_item=validation_metadata.path_to_item,
             seen_classes=validation_metadata.seen_classes | frozenset({cls}),
-            validated_path_to_schemas=validation_metadata.validated_path_to_schemas
+            validated_path_to_schemas=validation_metadata.validated_path_to_schemas,
         )
 
         # process composed schema
         discriminator = None
-        if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'discriminator'):
+        if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "discriminator"):
             discriminator = cls.MetaOapg.discriminator()
         discriminated_cls = None
         if discriminator and arg and isinstance(arg, frozendict.frozendict):
             disc_property_name = list(discriminator.keys())[0]
-            cls._ensure_discriminator_value_present_oapg(disc_property_name, updated_vm, arg)
+            cls._ensure_discriminator_value_present_oapg(
+                disc_property_name, updated_vm, arg
+            )
             # get discriminated_cls by looking at the dict in the current class
             discriminated_cls = cls.get_discriminated_class_oapg(
-                disc_property_name=disc_property_name, disc_payload_value=arg[disc_property_name])
+                disc_property_name=disc_property_name,
+                disc_payload_value=arg[disc_property_name],
+            )
             if discriminated_cls is None:
                 raise ApiValueError(
                     "Invalid discriminator value '{}' was passed in to {}.{} Only the values {} are allowed at {}".format(
@@ -2091,29 +2483,27 @@ def _validate_oapg(
                         cls.__name__,
                         disc_property_name,
                         list(discriminator[disc_property_name].keys()),
-                        updated_vm.path_to_item + (disc_property_name,)
+                        updated_vm.path_to_item + (disc_property_name,),
                     )
                 )
 
-        if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'all_of'):
-            other_path_to_schemas = cls.__get_allof_classes(arg, validation_metadata=updated_vm)
+        if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "all_of"):
+            other_path_to_schemas = cls.__get_allof_classes(
+                arg, validation_metadata=updated_vm
+            )
             update(path_to_schemas, other_path_to_schemas)
-        if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'one_of'):
+        if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "one_of"):
             other_path_to_schemas = cls.__get_oneof_class(
-                arg,
-                discriminated_cls=discriminated_cls,
-                validation_metadata=updated_vm
+                arg, discriminated_cls=discriminated_cls, validation_metadata=updated_vm
             )
             update(path_to_schemas, other_path_to_schemas)
-        if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'any_of'):
+        if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "any_of"):
             other_path_to_schemas = cls.__get_anyof_classes(
-                arg,
-                discriminated_cls=discriminated_cls,
-                validation_metadata=updated_vm
+                arg, discriminated_cls=discriminated_cls, validation_metadata=updated_vm
             )
             update(path_to_schemas, other_path_to_schemas)
         not_cls = None
-        if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'not_schema'):
+        if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "not_schema"):
             not_cls = cls.MetaOapg.not_schema
             not_cls = cls._get_class_oapg(not_cls)
         if not_cls:
@@ -2129,13 +2519,17 @@ def _validate_oapg(
                 raise not_exception
 
             try:
-                other_path_to_schemas = not_cls._validate_oapg(arg, validation_metadata=updated_vm)
+                other_path_to_schemas = not_cls._validate_oapg(
+                    arg, validation_metadata=updated_vm
+                )
             except (ApiValueError, ApiTypeError):
                 pass
             if other_path_to_schemas:
                 raise not_exception
 
-        if discriminated_cls is not None and not updated_vm.validation_ran_earlier(discriminated_cls):
+        if discriminated_cls is not None and not updated_vm.validation_ran_earlier(
+            discriminated_cls
+        ):
             # TODO use an exception from this package here
             assert discriminated_cls in path_to_schemas[updated_vm.path_to_item]
         return path_to_schemas
@@ -2151,60 +2545,73 @@ class ComposedSchema(
     BoolBase,
     NoneBase,
     Schema,
-    NoneFrozenDictTupleStrDecimalBoolMixin
+    NoneFrozenDictTupleStrDecimalBoolMixin,
 ):
     @classmethod
-    def from_openapi_data_oapg(cls, *args: typing.Any, _configuration: typing.Optional[Configuration] = None, **kwargs):
+    def from_openapi_data_oapg(
+        cls,
+        *args: typing.Any,
+        _configuration: typing.Optional[Configuration] = None,
+        **kwargs,
+    ):
         if not args:
             if not kwargs:
-                raise ApiTypeError('{} is missing required input data in args or kwargs'.format(cls.__name__))
-            args = (kwargs, )
+                raise ApiTypeError(
+                    "{} is missing required input data in args or kwargs".format(
+                        cls.__name__
+                    )
+                )
+            args = (kwargs,)
         return super().from_openapi_data_oapg(args[0], _configuration=_configuration)
 
 
-class ListSchema(
-    ListBase,
-    Schema,
-    TupleMixin
-):
+class ListSchema(ListBase, Schema, TupleMixin):
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: typing.List[typing.Any], _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls,
+        arg: typing.List[typing.Any],
+        _configuration: typing.Optional[Configuration] = None,
+    ):
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
-    def __new__(cls, arg: typing.Union[typing.List[typing.Any], typing.Tuple[typing.Any]], **kwargs: Configuration):
+    def __new__(
+        cls,
+        arg: typing.Union[typing.List[typing.Any], typing.Tuple[typing.Any]],
+        **kwargs: Configuration,
+    ):
         return super().__new__(cls, arg, **kwargs)
 
 
-class NoneSchema(
-    NoneBase,
-    Schema,
-    NoneMixin
-):
+class NoneSchema(NoneBase, Schema, NoneMixin):
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: None, _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls, arg: None, _configuration: typing.Optional[Configuration] = None
+    ):
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
     def __new__(cls, arg: None, **kwargs: Configuration):
         return super().__new__(cls, arg, **kwargs)
 
 
-class NumberSchema(
-    NumberBase,
-    Schema,
-    DecimalMixin
-):
+class NumberSchema(NumberBase, Schema, DecimalMixin):
     """
     This is used for type: number with no format
     Both integers AND floats are accepted
     """
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: typing.Union[int, float], _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls,
+        arg: typing.Union[int, float],
+        _configuration: typing.Optional[Configuration] = None,
+    ):
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
-    def __new__(cls, arg: typing.Union[decimal.Decimal, int, float], **kwargs: Configuration):
+    def __new__(
+        cls, arg: typing.Union[decimal.Decimal, int, float], **kwargs: Configuration
+    ):
         return super().__new__(cls, arg, **kwargs)
 
 
@@ -2218,13 +2625,19 @@ def as_int_oapg(self) -> int:
             return self._as_int
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls,
+        arg: typing.Optional[decimal.Decimal],
+        validation_metadata: ValidationMetadata,
+    ):
         if isinstance(arg, decimal.Decimal):
 
             denominator = arg.as_integer_ratio()[-1]
             if denominator != 1:
                 raise ApiValueError(
-                    "Invalid value '{}' for type integer at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type integer at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -2244,7 +2657,9 @@ def _validate_oapg(
 class IntSchema(IntBase, NumberSchema):
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: int, _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls, arg: int, _configuration: typing.Optional[Configuration] = None
+    ):
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
     def __new__(cls, arg: typing.Union[decimal.Decimal, int], **kwargs: Configuration):
@@ -2256,11 +2671,17 @@ class Int32Base:
     __inclusive_maximum = decimal.Decimal(2147483647)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls,
+        arg: typing.Optional[decimal.Decimal],
+        validation_metadata: ValidationMetadata,
+    ):
         if isinstance(arg, decimal.Decimal) and arg.as_tuple().exponent == 0:
             if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum:
                 raise ApiValueError(
-                    "Invalid value '{}' for type int32 at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type int32 at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -2276,10 +2697,7 @@ def _validate_oapg(
         return super()._validate_oapg(arg, validation_metadata=validation_metadata)
 
 
-class Int32Schema(
-    Int32Base,
-    IntSchema
-):
+class Int32Schema(Int32Base, IntSchema):
     pass
 
 
@@ -2288,11 +2706,17 @@ class Int64Base:
     __inclusive_maximum = decimal.Decimal(9223372036854775807)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls,
+        arg: typing.Optional[decimal.Decimal],
+        validation_metadata: ValidationMetadata,
+    ):
         if isinstance(arg, decimal.Decimal) and arg.as_tuple().exponent == 0:
             if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum:
                 raise ApiValueError(
-                    "Invalid value '{}' for type int64 at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type int64 at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -2308,23 +2732,26 @@ def _validate_oapg(
         return super()._validate_oapg(arg, validation_metadata=validation_metadata)
 
 
-class Int64Schema(
-    Int64Base,
-    IntSchema
-):
+class Int64Schema(Int64Base, IntSchema):
     pass
 
 
 class Float32Base:
-    __inclusive_minimum = decimal.Decimal(-3.4028234663852886e+38)
-    __inclusive_maximum = decimal.Decimal(3.4028234663852886e+38)
+    __inclusive_minimum = decimal.Decimal(-3.4028234663852886e38)
+    __inclusive_maximum = decimal.Decimal(3.4028234663852886e38)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls,
+        arg: typing.Optional[decimal.Decimal],
+        validation_metadata: ValidationMetadata,
+    ):
         if isinstance(arg, decimal.Decimal):
             if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum:
                 raise ApiValueError(
-                    "Invalid value '{}' for type float at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type float at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -2340,26 +2767,31 @@ def _validate_oapg(
         return super()._validate_oapg(arg, validation_metadata=validation_metadata)
 
 
-class Float32Schema(
-    Float32Base,
-    NumberSchema
-):
+class Float32Schema(Float32Base, NumberSchema):
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: float, _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls, arg: float, _configuration: typing.Optional[Configuration] = None
+    ):
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
 
 class Float64Base:
-    __inclusive_minimum = decimal.Decimal(-1.7976931348623157E+308)
-    __inclusive_maximum = decimal.Decimal(1.7976931348623157E+308)
+    __inclusive_minimum = decimal.Decimal(-1.7976931348623157e308)
+    __inclusive_maximum = decimal.Decimal(1.7976931348623157e308)
 
     @classmethod
-    def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata):
+    def __validate_format(
+        cls,
+        arg: typing.Optional[decimal.Decimal],
+        validation_metadata: ValidationMetadata,
+    ):
         if isinstance(arg, decimal.Decimal):
             if not cls.__inclusive_minimum <= arg <= cls.__inclusive_maximum:
                 raise ApiValueError(
-                    "Invalid value '{}' for type double at {}".format(arg, validation_metadata.path_to_item)
+                    "Invalid value '{}' for type double at {}".format(
+                        arg, validation_metadata.path_to_item
+                    )
                 )
 
     @classmethod
@@ -2375,22 +2807,17 @@ def _validate_oapg(
         return super()._validate_oapg(arg, validation_metadata=validation_metadata)
 
 
-class Float64Schema(
-    Float64Base,
-    NumberSchema
-):
+class Float64Schema(Float64Base, NumberSchema):
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: float, _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls, arg: float, _configuration: typing.Optional[Configuration] = None
+    ):
         # todo check format
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
 
-class StrSchema(
-    StrBase,
-    Schema,
-    StrMixin
-):
+class StrSchema(StrBase, Schema, StrMixin):
     """
     date + datetime string types must inherit from this class
     That is because one can validate a str payload as both:
@@ -2399,10 +2826,14 @@ class StrSchema(
     """
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: str, _configuration: typing.Optional[Configuration] = None) -> 'StrSchema':
+    def from_openapi_data_oapg(
+        cls, arg: str, _configuration: typing.Optional[Configuration] = None
+    ) -> "StrSchema":
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
-    def __new__(cls, arg: typing.Union[str, date, datetime, uuid.UUID], **kwargs: Configuration):
+    def __new__(
+        cls, arg: typing.Union[str, date, datetime, uuid.UUID], **kwargs: Configuration
+    ):
         return super().__new__(cls, arg, **kwargs)
 
 
@@ -2438,21 +2869,16 @@ def __new__(cls, arg: str, **kwargs: Configuration):
         return super().__new__(cls, arg, **kwargs)
 
 
-class BytesSchema(
-    Schema,
-    BytesMixin
-):
+class BytesSchema(Schema, BytesMixin):
     """
     this class will subclass bytes and is immutable
     """
+
     def __new__(cls, arg: bytes, **kwargs: Configuration):
         return super(Schema, cls).__new__(cls, arg)
 
 
-class FileSchema(
-    Schema,
-    FileMixin
-):
+class FileSchema(Schema, FileMixin):
     """
     This class is NOT immutable
     Dynamic classes are built using it for example when AnyType allows in binary data
@@ -2470,7 +2896,9 @@ class FileSchema(
     - to be able to preserve file name info
     """
 
-    def __new__(cls, arg: typing.Union[io.FileIO, io.BufferedReader], **kwargs: Configuration):
+    def __new__(
+        cls, arg: typing.Union[io.FileIO, io.BufferedReader], **kwargs: Configuration
+    ):
         return super(Schema, cls).__new__(cls, arg)
 
 
@@ -2478,12 +2906,7 @@ class BinaryBase:
     pass
 
 
-class BinarySchema(
-    ComposedBase,
-    BinaryBase,
-    Schema,
-    BinaryMixin
-):
+class BinarySchema(ComposedBase, BinaryBase, Schema, BinaryMixin):
     class MetaOapg:
         @staticmethod
         def one_of():
@@ -2492,18 +2915,20 @@ def one_of():
                 FileSchema,
             ]
 
-    def __new__(cls, arg: typing.Union[io.FileIO, io.BufferedReader, bytes], **kwargs: Configuration):
+    def __new__(
+        cls,
+        arg: typing.Union[io.FileIO, io.BufferedReader, bytes],
+        **kwargs: Configuration,
+    ):
         return super().__new__(cls, arg)
 
 
-class BoolSchema(
-    BoolBase,
-    Schema,
-    BoolMixin
-):
+class BoolSchema(BoolBase, Schema, BoolMixin):
 
     @classmethod
-    def from_openapi_data_oapg(cls, arg: bool, _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls, arg: bool, _configuration: typing.Optional[Configuration] = None
+    ):
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
     def __new__(cls, arg: bool, **kwargs: ValidationMetadata):
@@ -2518,7 +2943,7 @@ class AnyTypeSchema(
     BoolBase,
     NoneBase,
     Schema,
-    NoneFrozenDictTupleStrDecimalBoolFileBytesMixin
+    NoneFrozenDictTupleStrDecimalBoolFileBytesMixin,
 ):
     # Python representation of a schema defined as true or {}
     pass
@@ -2545,7 +2970,7 @@ def __new__(
         cls,
         *args,
         _configuration: typing.Optional[Configuration] = None,
-    ) -> 'NotAnyTypeSchema':
+    ) -> "NotAnyTypeSchema":
         return super().__new__(
             cls,
             *args,
@@ -2553,26 +2978,55 @@ def __new__(
         )
 
 
-class DictSchema(
-    DictBase,
-    Schema,
-    FrozenDictMixin
-):
+class DictSchema(DictBase, Schema, FrozenDictMixin):
     @classmethod
-    def from_openapi_data_oapg(cls, arg: typing.Dict[str, typing.Any], _configuration: typing.Optional[Configuration] = None):
+    def from_openapi_data_oapg(
+        cls,
+        arg: typing.Dict[str, typing.Any],
+        _configuration: typing.Optional[Configuration] = None,
+    ):
         return super().from_openapi_data_oapg(arg, _configuration=_configuration)
 
-    def __new__(cls, *args: typing.Union[dict, frozendict.frozendict], **kwargs: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, bytes, Schema, Unset, ValidationMetadata]):
+    def __new__(
+        cls,
+        *args: typing.Union[dict, frozendict.frozendict],
+        **kwargs: typing.Union[
+            dict,
+            frozendict.frozendict,
+            list,
+            tuple,
+            decimal.Decimal,
+            float,
+            int,
+            str,
+            date,
+            datetime,
+            bool,
+            None,
+            bytes,
+            Schema,
+            Unset,
+            ValidationMetadata,
+        ],
+    ):
         return super().__new__(cls, *args, **kwargs)
 
 
-schema_type_classes = {NoneSchema, DictSchema, ListSchema, NumberSchema, StrSchema, BoolSchema, AnyTypeSchema}
+schema_type_classes = {
+    NoneSchema,
+    DictSchema,
+    ListSchema,
+    NumberSchema,
+    StrSchema,
+    BoolSchema,
+    AnyTypeSchema,
+}
 
 
 @functools.lru_cache()
 def get_new_class(
     class_name: str,
-    bases: typing.Tuple[typing.Type[typing.Union[Schema, typing.Any]], ...]
+    bases: typing.Tuple[typing.Type[typing.Union[Schema, typing.Any]], ...],
 ) -> typing.Type[Schema]:
     """
     Returns a new class that is made with the subclass bases
diff --git a/domino/agents/_eval_tags.py b/domino/agents/_eval_tags.py
index 4bc9283a..e42b49d7 100644
--- a/domino/agents/_eval_tags.py
+++ b/domino/agents/_eval_tags.py
@@ -1,8 +1,8 @@
 import re
 from typing import Optional
 
-from ._constants import EVALUATION_TAG_PREFIX
 from ..exceptions import DominoException
+from ._constants import EVALUATION_TAG_PREFIX
 
 VALID_LABEL_PATTERN = r"[a-zA-Z0-9_-]+"
 TAG_MATCHER_PATTERN = (
diff --git a/domino/agents/_verify_domino_support.py b/domino/agents/_verify_domino_support.py
index b7badae9..f5349fee 100644
--- a/domino/agents/_verify_domino_support.py
+++ b/domino/agents/_verify_domino_support.py
@@ -1,13 +1,15 @@
 import logging
-import mlflow
 import os
-import semver
 from urllib.parse import urljoin
 
+import mlflow
+import semver
+
 from domino.exceptions import UnsupportedOperationException
+
 from ..authentication import get_auth_by_type
-from ._constants import MIN_MLFLOW_VERSION, MIN_DOMINO_VERSION
 from ..http_request_manager import _HttpRequestManager
+from ._constants import MIN_DOMINO_VERSION, MIN_MLFLOW_VERSION
 
 # not thread safe. I am not sure if this will be a problem for users, so am not implementing locking
 # it is ok if multiple requests are sent to verify Domino version support
diff --git a/domino/agents/logging/__init__.py b/domino/agents/logging/__init__.py
index 500be055..a53c8c8d 100644
--- a/domino/agents/logging/__init__.py
+++ b/domino/agents/logging/__init__.py
@@ -1,5 +1,5 @@
+from .dominorun import DominoAgentContext, DominoRun, SummaryStatistic
 from .logging import log_evaluation
-from .dominorun import DominoRun, DominoAgentContext, SummaryStatistic
 
 __all__ = [
     "DominoRun",
diff --git a/domino/agents/logging/dominorun.py b/domino/agents/logging/dominorun.py
index b61af1a9..1482841a 100644
--- a/domino/agents/logging/dominorun.py
+++ b/domino/agents/logging/dominorun.py
@@ -1,14 +1,19 @@
 import itertools
 import logging
-import mlflow
 import re
-from statistics import median, stdev
 import traceback
-from typing import Literal, Optional, Callable
+from statistics import median, stdev
+from typing import Callable, Literal, Optional
+
+import mlflow
 
 from .._client import client
-from .._constants import LARGEST_MAX_RESULTS_PAGE_SIZE, DOMINO_INTERNAL_EVAL_TAG, AGENT_RUN_TAG
-from .._eval_tags import build_metric_tag, VALID_LABEL_PATTERN
+from .._constants import (
+    AGENT_RUN_TAG,
+    DOMINO_INTERNAL_EVAL_TAG,
+    LARGEST_MAX_RESULTS_PAGE_SIZE,
+)
+from .._eval_tags import VALID_LABEL_PATTERN, build_metric_tag
 from .._verify_domino_support import verify_domino_support
 from ..read_agent_config import get_flattened_agent_config
 
@@ -333,6 +338,7 @@ class DominoAgentContext(DominoRun):
 
     Returns: DominoAgentContext context manager
     """
+
     _is_agent_context = True
 
 
diff --git a/domino/agents/logging/logging.py b/domino/agents/logging/logging.py
index 4738635d..ce872da5 100644
--- a/domino/agents/logging/logging.py
+++ b/domino/agents/logging/logging.py
@@ -1,9 +1,9 @@
 import json
 
 from .._client import client
+from .._constants import DOMINO_INTERNAL_EVAL_TAG
 from .._eval_tags import build_eval_result_tag, validate_label
 from .._verify_domino_support import verify_domino_support
-from .._constants import DOMINO_INTERNAL_EVAL_TAG
 
 
 def add_domino_tags(trace_id: str):
@@ -16,10 +16,10 @@ def add_domino_tags(trace_id: str):
 
 
 def log_evaluation(
-            trace_id: str,
-            name: str,
-            value: float | str,
-        ):
+    trace_id: str,
+    name: str,
+    value: float | str,
+):
     """This logs evaluation data and metadata to a parent trace. This is used to log the evaluation of a span
     after it was created. This is useful for analyzing past performance of an Agent component.
 
diff --git a/domino/agents/read_agent_config.py b/domino/agents/read_agent_config.py
index 2fbb2915..1e11ca17 100644
--- a/domino/agents/read_agent_config.py
+++ b/domino/agents/read_agent_config.py
@@ -1,6 +1,7 @@
 import logging
 import os
 from typing import Optional
+
 import yaml
 
 
@@ -40,7 +41,7 @@ def read_agent_config(path: Optional[str] = None) -> dict:
     path = path or _get_agent_config_path()
     params = {}
     try:
-        with open(path, 'r') as f:
+        with open(path, "r") as f:
             params = yaml.safe_load(f)
     except Exception as e:
         logging.warning(f"Failed to read agent config yaml at path {path}: {e}")
diff --git a/domino/agents/tracing/__init__.py b/domino/agents/tracing/__init__.py
index c5b93399..00a15ee3 100644
--- a/domino/agents/tracing/__init__.py
+++ b/domino/agents/tracing/__init__.py
@@ -1,13 +1,13 @@
+from .inittracing import init_tracing as init_tracing
 from .tracing import (
-    add_tracing,
-    SpanSummary,
     EvaluationResult,
-    TraceSummary,
     SearchTracesResponse,
-    search_traces,
+    SpanSummary,
+    TraceSummary,
+    add_tracing,
     search_agent_traces,
+    search_traces,
 )
-from .inittracing import init_tracing as init_tracing
 
 __all__ = [
     "add_tracing",
diff --git a/domino/agents/tracing/inittracing.py b/domino/agents/tracing/inittracing.py
index 39737b33..b229afda 100644
--- a/domino/agents/tracing/inittracing.py
+++ b/domino/agents/tracing/inittracing.py
@@ -1,12 +1,13 @@
 import logging
-import mlflow
 import threading
 from typing import Optional
 
+import mlflow
+
 from .._client import client
 from .._constants import EXPERIMENT_AGENT_TAG
-from ._util import is_agent, get_running_agent_experiment_name
 from .._verify_domino_support import verify_domino_support
+from ._util import get_running_agent_experiment_name, is_agent
 
 # init_tracing is not thread safe. this likely won't cause an issue with the autolog frameworks. If data inconsistency is caused with
 # autolog frameworks, then the worst case scenario is that we get duplicate autolog calls. These are local to the process
@@ -85,7 +86,6 @@ def init_tracing(autolog_frameworks: Optional[list[str]] = None):
                 _is_prod_tracing_initialized = True
 
     for fw in frameworks:
-        global triggered_autolog_frameworks
         if fw not in triggered_autolog_frameworks:
             triggered_autolog_frameworks.add(fw)
             call_autolog(fw)
diff --git a/domino/agents/tracing/tracing.py b/domino/agents/tracing/tracing.py
index a3c5faba..771b9ff6 100644
--- a/domino/agents/tracing/tracing.py
+++ b/domino/agents/tracing/tracing.py
@@ -1,19 +1,20 @@
-from dataclasses import dataclass
-from datetime import datetime
 import functools
 import inspect
 import logging
+from dataclasses import dataclass
+from datetime import datetime
+from typing import Any, Callable, Optional
+from uuid import uuid4
+
 import mlflow
 from mlflow.entities import SpanType
-from typing import Optional, Callable, Any
-from uuid import uuid4
 
 from .._client import client
-from .inittracing import init_tracing
-from ..logging.logging import log_evaluation
-from ._util import get_is_production, build_agent_experiment_name
-from .._eval_tags import validate_label, get_eval_tag_name
+from .._eval_tags import get_eval_tag_name, validate_label
 from .._verify_domino_support import verify_domino_support
+from ..logging.logging import log_evaluation
+from ._util import build_agent_experiment_name, get_is_production
+from .inittracing import init_tracing
 
 EvalResult = dict[str, int | float | str]
 
@@ -256,7 +257,9 @@ def wrapper(*args, **kwargs):
             result = DOMINO_NO_RESULT_ADD_TRACING
             init_tracing(autolog_frameworks)
 
-            with mlflow.start_span(name, span_type=span_type, attributes=attributes) as parent_span:
+            with mlflow.start_span(
+                name, span_type=span_type, attributes=attributes
+            ) as parent_span:
                 _set_span_inputs(parent_span, func, args, kwargs)
 
                 result = func(*args, **kwargs)
@@ -276,7 +279,9 @@ async def async_wrapper(*args, **kwargs):
                 result = DOMINO_NO_RESULT_ADD_TRACING
                 init_tracing(autolog_frameworks)
 
-                with mlflow.start_span(name, span_type=span_type, attributes=attributes) as parent_span:
+                with mlflow.start_span(
+                    name, span_type=span_type, attributes=attributes
+                ) as parent_span:
                     _set_span_inputs(parent_span, func, args, kwargs)
 
                     result = await func(*args, **kwargs)
@@ -294,11 +299,13 @@ async def async_wrapper(*args, **kwargs):
         if inspect.isgeneratorfunction(func):
 
             @functools.wraps(func)
-            def wrapper(*args, **kwargs):
+            def gen_wrapper(*args, **kwargs):
                 result = DOMINO_NO_RESULT_ADD_TRACING
                 init_tracing(autolog_frameworks)
 
-                with mlflow.start_span(name, span_type=span_type, attributes=attributes) as parent_span:
+                with mlflow.start_span(
+                    name, span_type=span_type, attributes=attributes
+                ) as parent_span:
                     inputs = _set_span_inputs(parent_span, func, args, kwargs)
 
                     result = func(*args, **kwargs)
@@ -315,7 +322,9 @@ def wrapper(*args, **kwargs):
                         i = -1
                         for v in result:
                             i += 1
-                            with mlflow.start_span(name, span_type=span_type, attributes=attributes) as gen_span:
+                            with mlflow.start_span(
+                                name, span_type=span_type, attributes=attributes
+                            ) as gen_span:
                                 # make span for each yielded value
                                 gen_span.set_inputs(inputs)
                                 gen_span.set_attributes(
@@ -330,10 +339,13 @@ def wrapper(*args, **kwargs):
                         for v in all_results:
                             yield v
 
-                if eagerly_evaluate_streamed_results and result != DOMINO_NO_RESULT_ADD_TRACING:
+                if (
+                    eagerly_evaluate_streamed_results
+                    and result != DOMINO_NO_RESULT_ADD_TRACING
+                ):
                     _log_eval_results(parent_span, evaluator, trace_evaluator)
 
-            return wrapper
+            return gen_wrapper
 
         return wrapper
 
@@ -469,9 +481,7 @@ def _search_traces(
         )
 
     if agent_version and not agent_id:
-        raise Exception(
-            "If agent_version is provided, agent_id must also be provided"
-        )
+        raise Exception("If agent_version is provided, agent_id must also be provided")
 
     filter_clauses = []
 
diff --git a/domino/airflow/_operator.py b/domino/airflow/_operator.py
index 2fabd5bd..7643f535 100644
--- a/domino/airflow/_operator.py
+++ b/domino/airflow/_operator.py
@@ -47,7 +47,7 @@ def __init__(
         startup_delay: Optional[int] = 10,
         include_setup_log: Optional[bool] = True,
         *args,
-        **kwargs
+        **kwargs,
     ):
         super(DominoOperator, self).__init__(*args, **kwargs)
 
@@ -197,7 +197,7 @@ def __init__(
         on_demand_spark_cluster_properties: Optional[dict] = None,
         compute_cluster_properties: Optional[dict] = None,
         *args,
-        **kwargs
+        **kwargs,
     ):
         super(DominoSparkOperator, self).__init__(*args, **kwargs)
 
diff --git a/domino/authentication.py b/domino/authentication.py
index 376c79c0..8eb9f6b9 100644
--- a/domino/authentication.py
+++ b/domino/authentication.py
@@ -3,8 +3,11 @@
 
 from requests.auth import AuthBase
 
-from .constants import DOMINO_TOKEN_FILE_KEY_NAME, DOMINO_USER_API_KEY_KEY_NAME, \
-    DOMINO_API_PROXY
+from .constants import (
+    DOMINO_API_PROXY,
+    DOMINO_TOKEN_FILE_KEY_NAME,
+    DOMINO_USER_API_KEY_KEY_NAME,
+)
 
 
 class ProxyAuth(AuthBase):
@@ -16,7 +19,10 @@ class ProxyAuth(AuthBase):
     def __init__(self, api_proxy):
         match = re.search("(https?://)?([^/]+:[0-9]+)$", api_proxy)
         if not match:
-            raise RuntimeError("Bad proxy URL: '%s', must be host:port or scheme://host:port" % api_proxy)
+            raise RuntimeError(
+                "Bad proxy URL: '%s', must be host:port or scheme://host:port"
+                % api_proxy
+            )
         if not match.group(1):
             proxy_str = "http://" + match.group(2)
         else:
@@ -35,7 +41,7 @@ def __call__(self, r):
         return r
 
     def _replaceHostWithProxy(self, url):
-        return re.sub('^.*?://[^/]+', self.api_proxy, url)
+        return re.sub("^.*?://[^/]+", self.api_proxy, url)
 
 
 class ApiKeyAuth(AuthBase):
@@ -78,7 +84,9 @@ def __call__(self, r):
         return r
 
 
-def get_auth_by_type(api_key=None, auth_token=None, domino_token_file=None, api_proxy=None):
+def get_auth_by_type(
+    api_key=None, auth_token=None, domino_token_file=None, api_proxy=None
+):
     """
     Return appropriate authentication object for requests.
 
@@ -110,7 +118,9 @@ def get_auth_by_type(api_key=None, auth_token=None, domino_token_file=None, api_
         domino_token_file_from_env = os.getenv(DOMINO_TOKEN_FILE_KEY_NAME)
         if api_key_from_env or domino_token_file_from_env or api_proxy_from_env:
             return get_auth_by_type(
-                api_key=api_key_from_env, domino_token_file=domino_token_file_from_env, api_proxy=api_proxy_from_env
+                api_key=api_key_from_env,
+                domino_token_file=domino_token_file_from_env,
+                api_proxy=api_proxy_from_env,
             )
         else:
             # All attempts failed -- nothing to do but raise an error.
diff --git a/domino/constants.py b/domino/constants.py
index 442dbfb9..cf5b71a6 100644
--- a/domino/constants.py
+++ b/domino/constants.py
@@ -1,6 +1,7 @@
 """
 Minimum Domino version supported by this python-domino library
 """
+
 MINIMUM_SUPPORTED_DOMINO_VERSION = "4.1.0"
 
 """
diff --git a/domino/datasets.py b/domino/datasets.py
index 52fe6542..0ed6a115 100644
--- a/domino/datasets.py
+++ b/domino/datasets.py
@@ -15,14 +15,15 @@
 FILE_UPLOAD_SETTING_DEFAULT = "Ignore"
 MAX_WORKERS = 10
 MAX_UPLOAD_ATTEMPTS = 10
-MB = 2 ** 20  # 2^20 bytes - 1 Megabyte
+MB = 2**20  # 2^20 bytes - 1 Megabyte
 SLEEP_TIME_IN_SEC = 3
 UPLOAD_READ_TIMEOUT_IN_SEC = 30
 
 
 @dataclass
 class UploadChunk:
-    """ Class for keeping track of a dataset upload chunk."""
+    """Class for keeping track of a dataset upload chunk."""
+
     absolute_path: str
     chunk_number: int
     dataset_id: str
@@ -45,15 +46,18 @@ def __init__(
         request_manager: _HttpRequestManager,
         routes: _Routes,
         target_relative_path: str,
-
         file_upload_setting: str,
         max_workers: int,
         target_chunk_size: int,
-        interrupted: bool = False
+        interrupted: bool = False,
     ):
-        cleaned_relative_local_path = os.path.relpath(os.path.normpath(local_path_to_file_or_directory), start=os.curdir)
+        cleaned_relative_local_path = os.path.relpath(
+            os.path.normpath(local_path_to_file_or_directory), start=os.curdir
+        )
         # in case running on windows
-        cleaned_relative_local_path = self._get_unix_style_path(cleaned_relative_local_path)
+        cleaned_relative_local_path = self._get_unix_style_path(
+            cleaned_relative_local_path
+        )
 
         self.csrf_no_check_header = csrf_no_check_header
         self.dataset_id = dataset_id
@@ -72,44 +76,56 @@ def __enter__(self):
         # creating upload session
         start_upload_body = {
             "filePaths": [],
-            "fileCollisionSetting": self.file_upload_setting
+            "fileCollisionSetting": self.file_upload_setting,
         }
         start_upload_url = self.routes.datasets_start_upload(self.dataset_id)
-        self.upload_key = self.request_manager.post(start_upload_url, json=start_upload_body).json()
+        self.upload_key = self.request_manager.post(
+            start_upload_url, json=start_upload_body
+        ).json()
         if not self.upload_key:
-            raise RuntimeError(f"upload key for {self.dataset_id} not found. Session could not start.")
+            raise RuntimeError(
+                f"upload key for {self.dataset_id} not found. Session could not start."
+            )
         return self
 
     def __exit__(self, exc_type, exc_val, exc_tb):
         # catching errors
         if exc_type is not None:
-            self.log.error(f"Upload for dataset {self.dataset_id} and file or directory "
-                           f"`{self.local_path_file_or_directory}` failed, attempting to cancel session. "
-                           f"Please try again.")
+            self.log.error(
+                f"Upload for dataset {self.dataset_id} and file or directory "
+                f"`{self.local_path_file_or_directory}` failed, attempting to cancel session. "
+                f"Please try again."
+            )
             self.log.error(f"Error type: {exc_val}. Error message: {exc_tb}.")
             if not isinstance(exc_type, ValueError):
                 self._cancel_upload_session()  # is it is a ValueError, canceling session would fail
             return False
         # ending snapshot upload
         try:
-            url = self.routes.datasets_end_upload(self.dataset_id, self.upload_key, self.target_relative_path)
+            url = self.routes.datasets_end_upload(
+                self.dataset_id, self.upload_key, self.target_relative_path
+            )
             self.request_manager.get(url)
             self.log.info("Upload session ended successfully.")
             return True
         except Exception:
-            self.log.error("Ending snapshot upload failed. See error for details. Attempting to cancel "
-                           "upload session.")
+            self.log.error(
+                "Ending snapshot upload failed. See error for details. Attempting to cancel "
+                "upload session."
+            )
             self._cancel_upload_session()
             return False
 
     def upload(self):
         try:
             if not self.upload_key:
-                raise RuntimeError(f"upload key for {self.dataset_id} not found. Please start session before uploading.")
+                raise RuntimeError(
+                    f"upload key for {self.dataset_id} not found. Please start session before uploading."
+                )
             q = self._create_chunk_queue()
             with ThreadPoolExecutor(self.max_workers) as executor:
                 # list ensures all the threads are complete before returning results
-                results = list(executor.map(self._upload_chunk, q))
+                list(executor.map(self._upload_chunk, q))
             return self.local_path_file_or_directory
         except KeyboardInterrupt:
             self.interrupted = True  # this will allow the threads to stop properly
@@ -122,7 +138,9 @@ def _cancel_upload_session(self):
 
     def _create_chunk_queue(self) -> list[UploadChunk]:
         if not os.path.exists(self.local_path_file_or_directory):
-            raise ValueError(f"local file or directory {self.local_path_file_or_directory} does not exist.")
+            raise ValueError(
+                f"local file or directory {self.local_path_file_or_directory} does not exist."
+            )
         if os.path.isfile(self.local_path_file_or_directory):
             return self._create_chunks(self.local_path_file_or_directory)
         chunk_q = []
@@ -142,19 +160,28 @@ def _create_chunks(self, local_path_file, starting_index=1) -> list[UploadChunk]
         file_size = os.path.getsize(local_path_file)
         file_name = os.path.basename(local_path_file)
         total_chunks = max(int(math.ceil(float(file_size) / self.target_chunk_size)), 1)
-        return [UploadChunk(absolute_path=os.path.abspath(local_path_file), chunk_number=chunk_num,
-                            dataset_id=self.dataset_id, file_name=file_name, file_size=file_size,
-                            identifier=f"{file_size}-{file_name}", relative_path=local_path_file,
-                            target_chunk_size=self.target_chunk_size, total_chunks=total_chunks,
-                            upload_key=self.upload_key)
-                for chunk_num in range(starting_index, total_chunks + 1)]
+        return [
+            UploadChunk(
+                absolute_path=os.path.abspath(local_path_file),
+                chunk_number=chunk_num,
+                dataset_id=self.dataset_id,
+                file_name=file_name,
+                file_size=file_size,
+                identifier=f"{file_size}-{file_name}",
+                relative_path=local_path_file,
+                target_chunk_size=self.target_chunk_size,
+                total_chunks=total_chunks,
+                upload_key=self.upload_key,
+            )
+            for chunk_num in range(starting_index, total_chunks + 1)
+        ]
 
     def _upload_chunk(self, chunk: UploadChunk) -> None:
         if self.interrupted:
             return
         # read the file chunk
         starting_skip = chunk.target_chunk_size * (chunk.chunk_number - 1)
-        with open(chunk.absolute_path, 'rb') as file:
+        with open(chunk.absolute_path, "rb") as file:
             file.seek(starting_skip)
             chunk_data = file.read(chunk.target_chunk_size)
 
@@ -165,7 +192,9 @@ def _upload_chunk(self, chunk: UploadChunk) -> None:
         if should_upload:
             self._upload_chunk_retry(checksum, chunk, chunk_data)
         else:
-            self.log.info(f"Skipping chunk {chunk.chunk_number} of {chunk.total_chunks} for {chunk.file_name}")
+            self.log.info(
+                f"Skipping chunk {chunk.chunk_number} of {chunk.total_chunks} for {chunk.file_name}"
+            )
 
     @retry(tries=MAX_UPLOAD_ATTEMPTS, delay=SLEEP_TIME_IN_SEC, backoff=2)
     def _upload_chunk_retry(self, checksum: str, chunk: UploadChunk, chunk_data):
@@ -173,25 +202,43 @@ def _upload_chunk_retry(self, checksum: str, chunk: UploadChunk, chunk_data):
             return
         actual_chunk_size = len(chunk_data)
         # uploading chunk
-        self.log.info(f"Uploading chunk {chunk.chunk_number} of {chunk.total_chunks} for {chunk.file_name}")
-        upload_chunk_url = self.routes.datasets_upload_chunk(chunk.dataset_id, chunk.upload_key,
-                                                             chunk.chunk_number, chunk.total_chunks,
-                                                             chunk.target_chunk_size, actual_chunk_size,
-                                                             chunk.identifier, chunk.relative_path,
-                                                             checksum)
+        self.log.info(
+            f"Uploading chunk {chunk.chunk_number} of {chunk.total_chunks} for {chunk.file_name}"
+        )
+        upload_chunk_url = self.routes.datasets_upload_chunk(
+            chunk.dataset_id,
+            chunk.upload_key,
+            chunk.chunk_number,
+            chunk.total_chunks,
+            chunk.target_chunk_size,
+            actual_chunk_size,
+            chunk.identifier,
+            chunk.relative_path,
+            checksum,
+        )
         # files to pass in post's **kwargs
         files = {
-            chunk.relative_path: (chunk.file_name, chunk_data, 'application/octet-stream')
+            chunk.relative_path: (
+                chunk.file_name,
+                chunk_data,
+                "application/octet-stream",
+            )
         }
         start_time_ns = time.time_ns()
         # making call to upload
-        self.request_manager.post(upload_chunk_url, files=files, timeout=UPLOAD_READ_TIMEOUT_IN_SEC,
-                                  headers=self.csrf_no_check_header)
+        self.request_manager.post(
+            upload_chunk_url,
+            files=files,
+            timeout=UPLOAD_READ_TIMEOUT_IN_SEC,
+            headers=self.csrf_no_check_header,
+        )
         end_time_ns = time.time_ns()
         duration_ns = end_time_ns - start_time_ns
         bandwidth_bytes_per_second = actual_chunk_size / duration_ns * 1000000000.0
-        self.log.info(f"Uploaded chunk {chunk.chunk_number} of {chunk.total_chunks} for {chunk.file_name} "
-                      f"in {duration_ns / 1_000_000:.1f}ms ({bandwidth_bytes_per_second:.1f} B/s)")
+        self.log.info(
+            f"Uploaded chunk {chunk.chunk_number} of {chunk.total_chunks} for {chunk.file_name} "
+            f"in {duration_ns / 1_000_000:.1f}ms ({bandwidth_bytes_per_second:.1f} B/s)"
+        )
 
     def _test_chunk(self, chunk: UploadChunk, chunk_data: AnyStr) -> (bool, int):
         # computing the MD5 checksum
@@ -200,13 +247,22 @@ def _test_chunk(self, chunk: UploadChunk, chunk_data: AnyStr) -> (bool, int):
 
         chunk_checksum = digest.hexdigest().upper()
         # testing chunk
-        test_chunk_url = self.routes.datasets_test_chunk(chunk.dataset_id, chunk.upload_key, chunk.chunk_number,
-                                                         chunk.total_chunks, chunk.identifier, chunk_checksum)
+        test_chunk_url = self.routes.datasets_test_chunk(
+            chunk.dataset_id,
+            chunk.upload_key,
+            chunk.chunk_number,
+            chunk.total_chunks,
+            chunk.identifier,
+            chunk_checksum,
+        )
         # test chunk returns no content if it should upload
-        return self.request_manager.get(test_chunk_url).status_code == 204, chunk_checksum
+        return (
+            self.request_manager.get(test_chunk_url).status_code == 204,
+            chunk_checksum,
+        )
 
     def _get_unix_style_path(self, path: str) -> str:
         # when running on Windows, converts path to Unix-style path, which the upload chunk API expects
-        if os.sep != '/':
-            path = path.replace(os.sep, '/')
+        if os.sep != "/":
+            path = path.replace(os.sep, "/")
         return path
diff --git a/domino/domino.py b/domino/domino.py
index ce219934..1a134458 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -2,20 +2,24 @@
 import json
 import logging
 import os
-from packaging import version
 import re
 import time
-from typing import Any, Dict, List, Optional, Tuple
 import warnings
+from typing import Any, Dict, List, Optional, Tuple
 
 import polling2
 import requests
 from bs4 import BeautifulSoup
+from packaging import version
 
-from domino import exceptions, helpers, datasets
+from domino import datasets, exceptions, helpers
+from domino._custom_metrics import (
+    _CustomMetricsClient,
+    _CustomMetricsClientBase,
+    _CustomMetricsClientGen,
+)
 from domino._version import __version__
 from domino.authentication import get_auth_by_type
-from domino.domino_enums import BillingTagSettingMode, BudgetLabel, BudgetType, ProjectVisibility
 from domino.constants import (
     CLUSTER_TYPE_MIN_SUPPORT,
     DOMINO_HOST_KEY_NAME,
@@ -24,14 +28,25 @@
     MINIMUM_ON_DEMAND_SPARK_CLUSTER_SUPPORT_DOMINO_VERSION,
     MINIMUM_SUPPORTED_DOMINO_VERSION,
 )
+from domino.domino_enums import (
+    BillingTagSettingMode,
+    BudgetLabel,
+    BudgetType,
+    ProjectVisibility,
+)
 from domino.http_request_manager import _HttpRequestManager
 from domino.routes import _Routes
-from domino._custom_metrics import _CustomMetricsClientBase, _CustomMetricsClientGen, _CustomMetricsClient
 
 
 class Domino:
     def __init__(
-        self, project, api_key=None, host=None, domino_token_file=None, auth_token=None, api_proxy=None,
+        self,
+        project,
+        api_key=None,
+        host=None,
+        domino_token_file=None,
+        auth_token=None,
+        api_proxy=None,
     ):
 
         self._configure_logging()
@@ -87,7 +102,9 @@ def _configure_logging(self):
         logging.basicConfig(level=logging_level)
         self._logger = logging.getLogger(__name__)
 
-    def authenticate(self, api_key=None, auth_token=None, domino_token_file=None, api_proxy=None):
+    def authenticate(
+        self, api_key=None, auth_token=None, domino_token_file=None, api_proxy=None
+    ):
         """
         Method to authenticate the request manager. An existing domino client object can
         use this with a new token if the existing credentials expire.
@@ -120,13 +137,25 @@ def runs_start(
         **kwargs,
     ):
         if "isDirect" in kwargs:
-            warnings.warn("isDirect is deprecated, use is_direct", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "isDirect is deprecated, use is_direct",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             is_direct = kwargs.pop("isDirect")
         if "commitId" in kwargs:
-            warnings.warn("commitId is deprecated, use commit_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "commitId is deprecated, use commit_id",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             commit_id = kwargs.pop("commitId")
         if "publishApiEndpoint" in kwargs:
-            warnings.warn("publishApiEndpoint is deprecated, use publish_api_endpoint", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "publishApiEndpoint is deprecated, use publish_api_endpoint",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             publish_api_endpoint = kwargs.pop("publishApiEndpoint")
 
         url = self._routes.runs_start()
@@ -207,13 +236,25 @@ def runs_start_blocking(
                         threshold exceeds, an exception is raised.
         """
         if "isDirect" in kwargs:
-            warnings.warn("isDirect is deprecated, use is_direct", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "isDirect is deprecated, use is_direct",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             is_direct = kwargs.pop("isDirect")
         if "commitId" in kwargs:
-            warnings.warn("commitId is deprecated, use commit_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "commitId is deprecated, use commit_id",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             commit_id = kwargs.pop("commitId")
         if "publishApiEndpoint" in kwargs:
-            warnings.warn("publishApiEndpoint is deprecated, use publish_api_endpoint", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "publishApiEndpoint is deprecated, use publish_api_endpoint",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             publish_api_endpoint = kwargs.pop("publishApiEndpoint")
 
         run_response = self.runs_start(
@@ -274,17 +315,25 @@ def runs_start_blocking(
 
     def run_stop(self, run_id=None, save_changes=True, **kwargs):
         if "runId" in kwargs:
-            warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "runId is deprecated, use run_id", DeprecationWarning, stacklevel=2
+            )
             run_id = kwargs.pop("runId")
         if "saveChanges" in kwargs:
-            warnings.warn("saveChanges is deprecated, use save_changes", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "saveChanges is deprecated, use save_changes",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             save_changes = kwargs.pop("saveChanges")
         self.log.warning("Use job_stop method instead")
         return self.job_stop(job_id=run_id, commit_results=save_changes)
 
     def runs_status(self, run_id=None, **kwargs):
         if "runId" in kwargs:
-            warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "runId is deprecated, use run_id", DeprecationWarning, stacklevel=2
+            )
             run_id = kwargs.pop("runId")
         url = self._routes.runs_status(run_id)
         return self._get(url)
@@ -301,10 +350,16 @@ def get_run_log(self, run_id=None, include_setup_log=True, **kwargs):
                 whether or not to include the setup log in the output.
         """
         if "runId" in kwargs:
-            warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "runId is deprecated, use run_id", DeprecationWarning, stacklevel=2
+            )
             run_id = kwargs.pop("runId")
         if "includeSetupLog" in kwargs:
-            warnings.warn("includeSetupLog is deprecated, use include_setup_log", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "includeSetupLog is deprecated, use include_setup_log",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             include_setup_log = kwargs.pop("includeSetupLog")
 
         url = self._routes.runs_stdout(run_id)
@@ -333,7 +388,9 @@ def runs_stdout(self, run_id=None, **kwargs):
                 the id associated with the run.
         """
         if "runId" in kwargs:
-            warnings.warn("runId is deprecated, use run_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "runId is deprecated, use run_id", DeprecationWarning, stacklevel=2
+            )
             run_id = kwargs.pop("runId")
 
         html_start_tags = (
@@ -355,7 +412,7 @@ def runs_stdout(self, run_id=None, **kwargs):
 
         return stdout
 
-    def job_start(
+    def job_start(  # noqa: C901
         self,
         command: str,
         commit_id: Optional[str] = None,
@@ -558,9 +615,13 @@ def throw_if_information_invalid(key: str, info: dict) -> bool:
                 )
 
             if "masterHardwareTierId" in compute_cluster_properties:
-                self._validate_hardware_tier_id(compute_cluster_properties["masterHardwareTierId"])
+                self._validate_hardware_tier_id(
+                    compute_cluster_properties["masterHardwareTierId"]
+                )
 
-            self._validate_hardware_tier_id(compute_cluster_properties["workerHardwareTierId"])
+            self._validate_hardware_tier_id(
+                compute_cluster_properties["workerHardwareTierId"]
+            )
 
         def validate_is_external_volume_mounts_supported():
             if not helpers.is_external_volume_mounts_supported(self._version):
@@ -668,15 +729,17 @@ def job_stop(self, job_id: str, commit_results: bool = True):
         response = self.request_manager.post(url, json=request)
         return response
 
-    def jobs_list(self,
-                  project_id: str,
-                  order_by: str = "number",
-                  sort_by: str = "desc",
-                  page_size: Optional[int] = None,
-                  page_no: int = 1,
-                  show_archived: str = "false",
-                  status: str = "all",
-                  tag: Optional[str] = None):
+    def jobs_list(
+        self,
+        project_id: str,
+        order_by: str = "number",
+        sort_by: str = "desc",
+        page_size: Optional[int] = None,
+        page_no: int = 1,
+        show_archived: str = "false",
+        status: str = "all",
+        tag: Optional[str] = None,
+    ):
         """
         Lists job history for a given project_id
         :param project_id: The project to query
@@ -689,7 +752,16 @@ def jobs_list(self,
         :param tag: Optional tag filter
         :return: The details
         """
-        url = self._routes.jobs_list(project_id, order_by, sort_by, page_size, page_no, show_archived, status, tag)
+        url = self._routes.jobs_list(
+            project_id,
+            order_by,
+            sort_by,
+            page_size,
+            page_no,
+            show_archived,
+            status,
+            tag,
+        )
         return self._get(url)
 
     def job_status(self, job_id: str) -> dict:
@@ -700,11 +772,7 @@ def job_status(self, job_id: str) -> dict:
         """
         return self.request_manager.get(self._routes.job_status(job_id)).json()
 
-    def job_restart(
-                self,
-                job_id: str,
-                should_use_original_input_commit: bool = True
-            ):
+    def job_restart(self, job_id: str, should_use_original_input_commit: bool = True):
         """
         Restarts a previous job
         :param job_id:                              ID of the original job that should be restarted
@@ -713,7 +781,7 @@ def job_restart(
         url = self._routes.job_restart()
         request = {
             "jobId": job_id,
-            "shouldUseOriginalInputCommit": should_use_original_input_commit
+            "shouldUseOriginalInputCommit": should_use_original_input_commit,
         }
         response = self.request_manager.post(url, json=request)
         return response
@@ -772,7 +840,11 @@ def get_job_status(job_identifier):
 
     def files_list(self, commit_id=None, path="/", **kwargs):
         if "commitId" in kwargs:
-            warnings.warn("commitId is deprecated, use commit_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "commitId is deprecated, use commit_id",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             commit_id = kwargs.pop("commitId")
         url = self._routes.files_list(commit_id, path)
         return self._get(url)
@@ -789,8 +861,10 @@ def blobs_get(self, key):
         :param key: blob key
         :return: blob content
         """
-        message = "blobs_get is deprecated and will soon be removed. Please migrate to blobs_get_v2 and adjust the " \
-                  "input parameters accordingly "
+        message = (
+            "blobs_get is deprecated and will soon be removed. Please migrate to blobs_get_v2 and adjust the "
+            "input parameters accordingly "
+        )
         warnings.warn(message, DeprecationWarning)
         self._validate_blob_key(key)
         url = self._routes.blobs_get(key)
@@ -818,7 +892,11 @@ def endpoint_unpublish(self):
 
     def endpoint_publish(self, file, function, commit_id=None, **kwargs):
         if "commitId" in kwargs:
-            warnings.warn("commitId is deprecated, use commit_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "commitId is deprecated, use commit_id",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             commit_id = kwargs.pop("commitId")
         url = self._routes.endpoint_publish()
 
@@ -846,8 +924,13 @@ def project_create_v4(
         visibility: Optional[ProjectVisibility] = ProjectVisibility.PUBLIC,
     ):
         owner = (
-            owner_id if owner_id else self.get_user_id(owner_username) if owner_username
-            else self.get_user_id(self._owner_username)
+            owner_id
+            if owner_id
+            else (
+                self.get_user_id(owner_username)
+                if owner_username
+                else self.get_user_id(self._owner_username)
+            )
         )
         data = {
             "name": project_name,
@@ -863,7 +946,9 @@ def project_create_v4(
 
         url = self._routes.project_v4()
         payload = json.dumps(data)
-        response = self.request_manager.post(url, data=payload, headers={'Content-Type': 'application/json'})
+        response = self.request_manager.post(
+            url, data=payload, headers={"Content-Type": "application/json"}
+        )
         return response.json()
 
     def project_create(self, project_name, owner_username=None):
@@ -988,28 +1073,62 @@ def collaborators_remove(self, username_or_email):
         return response
 
     # App functions
-    def app_publish(self, unpublish_running_apps=True, hardware_tier_id=None, environment_id=None, external_volume_mount_ids=None, commit_id=None, branch=None, app_id=None, **kwargs):
+    def app_publish(
+        self,
+        unpublish_running_apps=True,
+        hardware_tier_id=None,
+        environment_id=None,
+        external_volume_mount_ids=None,
+        commit_id=None,
+        branch=None,
+        app_id=None,
+        **kwargs,
+    ):
         if "unpublishRunningApps" in kwargs:
-            warnings.warn("unpublishRunningApps is deprecated, use unpublish_running_apps", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "unpublishRunningApps is deprecated, use unpublish_running_apps",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             unpublish_running_apps = kwargs.pop("unpublishRunningApps")
         if "hardwareTierId" in kwargs:
-            warnings.warn("hardwareTierId is deprecated, use hardware_tier_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "hardwareTierId is deprecated, use hardware_tier_id",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             hardware_tier_id = kwargs.pop("hardwareTierId")
         if "environmentId" in kwargs:
-            warnings.warn("environmentId is deprecated, use environment_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "environmentId is deprecated, use environment_id",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             environment_id = kwargs.pop("environmentId")
         if "externalVolumeMountIds" in kwargs:
-            warnings.warn("externalVolumeMountIds is deprecated, use external_volume_mount_ids", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "externalVolumeMountIds is deprecated, use external_volume_mount_ids",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             external_volume_mount_ids = kwargs.pop("externalVolumeMountIds")
         if "commitId" in kwargs:
-            warnings.warn("commitId is deprecated, use commit_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "commitId is deprecated, use commit_id",
+                DeprecationWarning,
+                stacklevel=2,
+            )
             commit_id = kwargs.pop("commitId")
         if "appId" in kwargs:
-            warnings.warn("appId is deprecated, use app_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "appId is deprecated, use app_id", DeprecationWarning, stacklevel=2
+            )
             app_id = kwargs.pop("appId")
 
         if commit_id and branch:
-            raise ValueError("Only one of commit_id or branch may be specified, not both.")
+            raise ValueError(
+                "Only one of commit_id or branch may be specified, not both."
+            )
         app_id = app_id or self._app_id
         if unpublish_running_apps:
             self.app_unpublish(app_id=app_id)
@@ -1035,7 +1154,9 @@ def app_publish(self, unpublish_running_apps=True, hardware_tier_id=None, enviro
 
     def app_unpublish(self, app_id=None, **kwargs):
         if "appId" in kwargs:
-            warnings.warn("appId is deprecated, use app_id", DeprecationWarning, stacklevel=2)
+            warnings.warn(
+                "appId is deprecated, use app_id", DeprecationWarning, stacklevel=2
+            )
             app_id = kwargs.pop("appId")
         app_id = app_id or self._app_id
         if app_id is None:
@@ -1114,26 +1235,26 @@ def archive_environment(self, environment_id: str) -> None:
         self.request_manager.delete(url)
 
     def create_environment(
-            self,
-            name: str,
-            visibility: str,
-            dockerfile_instructions: str = "",
-            environment_variables: Optional[List[Dict[str, Any]]] = None,
-            base_image: str = "",
-            post_run_script: str = "",
-            post_setup_script: str = "",
-            pre_run_script: str = "",
-            pre_setup_script: str = "",
-            skip_cache: bool = False,
-            summary: str = "",
-            supported_clusters: Optional[List[str]] = None,
-            tags: Optional[List[str]] = None,
-            use_vpn: bool = False,
-            workspace_tools: Optional[List[Dict[str, Any]]] = None,
-            add_base_dependencies: bool = True,
-            description: str = "",
-            is_restricted: bool = False,
-            organization_owner_id: Optional[str] = None,
+        self,
+        name: str,
+        visibility: str,
+        dockerfile_instructions: str = "",
+        environment_variables: Optional[List[Dict[str, Any]]] = None,
+        base_image: str = "",
+        post_run_script: str = "",
+        post_setup_script: str = "",
+        pre_run_script: str = "",
+        pre_setup_script: str = "",
+        skip_cache: bool = False,
+        summary: str = "",
+        supported_clusters: Optional[List[str]] = None,
+        tags: Optional[List[str]] = None,
+        use_vpn: bool = False,
+        workspace_tools: Optional[List[Dict[str, Any]]] = None,
+        add_base_dependencies: bool = True,
+        description: str = "",
+        is_restricted: bool = False,
+        organization_owner_id: Optional[str] = None,
     ) -> dict:
         """
         Create a new Domino compute environment.
@@ -1195,36 +1316,36 @@ def create_environment(
             "description": description,
             "isRestricted": is_restricted,
             "name": name,
-            "visibility": visibility
+            "visibility": visibility,
         }
 
         if organization_owner_id:
-            data.update({
-                "orgOwnerId": organization_owner_id
-            })
+            data.update({"orgOwnerId": organization_owner_id})
 
         url = self._routes.environment_create()
         payload = json.dumps(data)
-        response = self.request_manager.post(url, data=payload, headers={"Content-Type": "application/json"})
+        response = self.request_manager.post(
+            url, data=payload, headers={"Content-Type": "application/json"}
+        )
         return response.json()
 
     def create_environment_revision(
-                self,
-                environment_id: str,
-                dockerfile_instructions: str = "",
-                environment_variables: Optional[List[Dict[str, Any]]] = None,
-                base_image: Optional[str] = None,
-                post_run_script: str = "",
-                post_setup_script: str = "",
-                pre_run_script: str = "",
-                pre_setup_script: str = "",
-                skip_cache: bool = False,
-                summary: str = "",
-                supported_clusters: Optional[List[str]] = None,
-                tags: Optional[List[str]] = None,
-                use_vpn: bool = False,
-                workspace_tools: Optional[List[Dict[str, Any]]] = None,
-            ) -> dict:
+        self,
+        environment_id: str,
+        dockerfile_instructions: str = "",
+        environment_variables: Optional[List[Dict[str, Any]]] = None,
+        base_image: Optional[str] = None,
+        post_run_script: str = "",
+        post_setup_script: str = "",
+        pre_run_script: str = "",
+        pre_setup_script: str = "",
+        skip_cache: bool = False,
+        summary: str = "",
+        supported_clusters: Optional[List[str]] = None,
+        tags: Optional[List[str]] = None,
+        use_vpn: bool = False,
+        workspace_tools: Optional[List[Dict[str, Any]]] = None,
+    ) -> dict:
         """
         Create a new revision of an existing Domino environment.
 
@@ -1275,30 +1396,30 @@ def create_environment_revision(
             "supportedClusters": supported_clusters,
             "tags": tags,
             "useVpn": use_vpn,
-            "workspaceTools": workspace_tools
+            "workspaceTools": workspace_tools,
         }
 
         url = self._routes.revision_create(environment_id)
         payload = json.dumps(data)
-        response = self.request_manager.post(url, data=payload, headers={"Content-Type": "application/json"})
+        response = self.request_manager.post(
+            url, data=payload, headers={"Content-Type": "application/json"}
+        )
         return response.json()
 
     def restrict_environment_revision(
-            self,
-            environment_id: str,
-            revision_id: str
-       ) -> None:
+        self, environment_id: str, revision_id: str
+    ) -> None:
         """
         Restrict an environment revision.
         """
 
-        data = {
-            "isRestricted": True
-        }
+        data = {"isRestricted": True}
 
         url = self._routes.revision_patch(environment_id, revision_id)
         payload = json.dumps(data)
-        self.request_manager.patch(url, data=payload, headers={"Content-Type": "application/json"})
+        self.request_manager.patch(
+            url, data=payload, headers={"Content-Type": "application/json"}
+        )
 
     # Model Manager functions
 
@@ -1423,7 +1544,7 @@ def datasets_upload_files(
         file_upload_setting: str = None,
         max_workers: int = None,
         target_chunk_size: int = None,
-        target_relative_path: str = None
+        target_relative_path: str = None,
     ) -> str:
         """Upload file to dataset with multithreaded support.
 
@@ -1445,15 +1566,21 @@ def datasets_upload_files(
         if file_upload_setting is None or file_upload_setting == "Ignore":
             text = "Ignore setting selected - any file with naming conflict will not be uploaded."
         elif file_upload_setting == "Overwrite":
-            text = "Overwrite setting selected - note that any existing file with naming conflict " \
-                   "will be overridden."
+            text = (
+                "Overwrite setting selected - note that any existing file with naming conflict "
+                "will be overridden."
+            )
         elif file_upload_setting == "Rename":
-            text = "Rename setting selected - note that naming conflicts will be resolved by appending " \
-                   "an increasing integer at the end of the uploaded files. In case of a directory with " \
-                   "numerous conflicts, this will cause severe file proliferation."
+            text = (
+                "Rename setting selected - note that naming conflicts will be resolved by appending "
+                "an increasing integer at the end of the uploaded files. In case of a directory with "
+                "numerous conflicts, this will cause severe file proliferation."
+            )
         else:
-            raise ValueError(f"input file_upload_setting {file_upload_setting} not allowed. Please use "
-                             f"`Overwrite`, `Rename`, or `Ignore` only.")
+            raise ValueError(
+                f"input file_upload_setting {file_upload_setting} not allowed. Please use "
+                f"`Overwrite`, `Rename`, or `Ignore` only."
+            )
         self.log.warning(text)
 
         with datasets.Uploader(
@@ -1463,15 +1590,16 @@ def datasets_upload_files(
             log=self.log,
             request_manager=self.request_manager,
             routes=self._routes,
-
             file_upload_setting=file_upload_setting,
             max_workers=max_workers,
             target_chunk_size=target_chunk_size,
-            target_relative_path=target_relative_path
+            target_relative_path=target_relative_path,
         ) as uploader:
             path = uploader.upload()
-            self.log.info(f"Uploading chunks for file or directory `{path}` to dataset {dataset_id} completed. "
-                          f"Now attempting to end upload session.")
+            self.log.info(
+                f"Uploading chunks for file or directory `{path}` to dataset {dataset_id} completed. "
+                f"Now attempting to end upload session."
+            )
             return path
 
     def model_version_export(
@@ -1571,7 +1699,9 @@ def budget_defaults_list(self) -> list:
         url = self._routes.budgets_default()
         return self.request_manager.get(url).json()
 
-    def budget_defaults_update(self, budget_label: BudgetLabel, budget_limit: float) -> dict:
+    def budget_defaults_update(
+        self, budget_label: BudgetLabel, budget_limit: float
+    ) -> dict:
         """
         Update default budgets limits (or quota) by BudgetLabel
         Requires Admin permission
@@ -1582,10 +1712,16 @@ def budget_defaults_update(self, budget_label: BudgetLabel, budget_limit: float)
         :return: Returns the updated budget with the newly assigned limit.
         """
         url = self._routes.budgets_default(budget_label.value)
-        updated_budget = {"budgetLabel": budget_label.value, "budgetType": "Default", "limit": budget_limit,
-                          "window": "monthly"}
+        updated_budget = {
+            "budgetLabel": budget_label.value,
+            "budgetType": "Default",
+            "limit": budget_limit,
+            "window": "monthly",
+        }
         data = json.dumps(updated_budget)
-        return self.request_manager.put(url, data=data, headers={"Content-Type": "application/json"}).json()
+        return self.request_manager.put(
+            url, data=data, headers={"Content-Type": "application/json"}
+        ).json()
 
     def budget_overrides_list(self):
         """
@@ -1597,7 +1733,9 @@ def budget_overrides_list(self):
         url = self._routes.budget_overrides()
         return self.request_manager.get(url).json()
 
-    def budget_override_create(self, budget_label: BudgetLabel, budget_id: str, budget_limit: float) -> dict:
+    def budget_override_create(
+        self, budget_label: BudgetLabel, budget_id: str, budget_limit: float
+    ) -> dict:
         """
         Create Budget overrides based on BudgetLabels, ie BillingTags, Organization, or Projects
         the object id is used as budget ids
@@ -1612,9 +1750,13 @@ def budget_override_create(self, budget_label: BudgetLabel, budget_id: str, budg
         url = self._routes.budget_overrides()
         new_budget: dict = self._generate_budget(budget_label, budget_id, budget_limit)
         data = json.dumps(new_budget)
-        return self.request_manager.post(url, data=data, headers={"Content-Type": "application/json"}).json()
+        return self.request_manager.post(
+            url, data=data, headers={"Content-Type": "application/json"}
+        ).json()
 
-    def budget_override_update(self, budget_label: BudgetLabel, budget_id: str, budget_limit: float) -> dict:
+    def budget_override_update(
+        self, budget_label: BudgetLabel, budget_id: str, budget_limit: float
+    ) -> dict:
         """
         Update Budget overrides based on BudgetLabel and budget id
         Requires Admin roles
@@ -1628,7 +1770,9 @@ def budget_override_update(self, budget_label: BudgetLabel, budget_id: str, budg
         url = self._routes.budget_overrides(budget_id)
         new_budget: dict = self._generate_budget(budget_label, budget_id, budget_limit)
         data = json.dumps(new_budget)
-        return self.request_manager.put(url, data=data, headers={"Content-Type": "application/json"}).json()
+        return self.request_manager.put(
+            url, data=data, headers={"Content-Type": "application/json"}
+        ).json()
 
     def budget_override_delete(self, budget_id: str) -> list:
         """
@@ -1655,7 +1799,7 @@ def budget_alerts_settings(self) -> dict:
     def budget_alerts_settings_update(
         self,
         alerts_enabled: Optional[bool] = None,
-        notify_org_owner: Optional[bool] = None
+        notify_org_owner: Optional[bool] = None,
     ) -> dict:
         """
         Update the current budget alerts settings to enable/disable budget notifications
@@ -1671,7 +1815,7 @@ def budget_alerts_settings_update(
 
         optional_fields = {
             "alertsEnabled": alerts_enabled,
-            "notifyOrgOwner": notify_org_owner
+            "notifyOrgOwner": notify_org_owner,
         }
 
         updated_settings = self._update_if_set(current_settings, optional_fields)
@@ -1698,7 +1842,9 @@ def budget_alerts_targets_update(self, targets: dict[BudgetLabel, list]) -> dict
 
         for target in current_targets:
             if target["label"] in targets:
-                updated_targets.append({"label": target["label"], "emails": targets[target["label"]]})
+                updated_targets.append(
+                    {"label": target["label"], "emails": targets[target["label"]]}
+                )
             else:
                 updated_targets.append(target)
 
@@ -1731,7 +1877,9 @@ def billing_tags_create(self, tags_list: list) -> dict:
         self.requires_at_least("5.11.0")
         url = self._routes.billing_tags()
         payload = json.dumps({"billingTags": tags_list})
-        return self.request_manager.post(url, data=payload, headers={"Content-Type": "application/json"}).json()
+        return self.request_manager.post(
+            url, data=payload, headers={"Content-Type": "application/json"}
+        ).json()
 
     def active_billing_tag_by_name(self, name: str) -> dict:
         """
@@ -1777,7 +1925,9 @@ def billing_tag_settings_mode(self) -> dict:
         url = self._routes.billing_tags_settings(mode_only=True)
         return self.request_manager.get(url).json()
 
-    def billing_tag_settings_mode_update(self, mode: BillingTagSettingMode) -> dict[str, BillingTagSettingMode]:
+    def billing_tag_settings_mode_update(
+        self, mode: BillingTagSettingMode
+    ) -> dict[str, BillingTagSettingMode]:
         """
         Update the current billing tag settings mode
         Requires Admin permission
@@ -1788,7 +1938,9 @@ def billing_tag_settings_mode_update(self, mode: BillingTagSettingMode) -> dict[
         """
         url = self._routes.billing_tags_settings(mode_only=True)
         payload = json.dumps({"mode": mode.value})
-        return self.request_manager.put(url, data=payload, headers={"Content-Type": "application/json"}).json()
+        return self.request_manager.put(
+            url, data=payload, headers={"Content-Type": "application/json"}
+        ).json()
 
     def project_billing_tag(self, project_id: Optional[str] = None) -> Optional[dict]:
         """
@@ -1799,10 +1951,14 @@ def project_billing_tag(self, project_id: Optional[str] = None) -> Optional[dict
 
         :return: Returns the billing tag if assigned or None
         """
-        url = self._routes.project_billing_tag(project_id if project_id else self.project_id)
+        url = self._routes.project_billing_tag(
+            project_id if project_id else self.project_id
+        )
         return self.request_manager.get(url).json()
 
-    def project_billing_tag_update(self, billing_tag: str, project_id: Optional[str] = None) -> dict:
+    def project_billing_tag_update(
+        self, billing_tag: str, project_id: Optional[str] = None
+    ) -> dict:
         """
         Update project's billing tag with new billing tag.
         Requires Admin permission
@@ -1812,10 +1968,10 @@ def project_billing_tag_update(self, billing_tag: str, project_id: Optional[str]
 
         :return: Returns the project details including the new billing tag
         """
-        url = self._routes.project_billing_tag(project_id if project_id else self.project_id)
-        data = {
-            "tag": billing_tag
-        }
+        url = self._routes.project_billing_tag(
+            project_id if project_id else self.project_id
+        )
+        data = {"tag": billing_tag}
         return self.request_manager.post(url, data=json.dumps(data)).json()
 
     def project_billing_tag_reset(self, project_id: Optional[str] = None) -> dict:
@@ -1827,7 +1983,9 @@ def project_billing_tag_reset(self, project_id: Optional[str] = None) -> dict:
 
         :return: Returns the project details
         """
-        url = self._routes.project_billing_tag(project_id if project_id else self.project_id)
+        url = self._routes.project_billing_tag(
+            project_id if project_id else self.project_id
+        )
         return self.request_manager.delete(url).json()
 
     def projects_by_billing_tag(
@@ -1860,7 +2018,7 @@ def projects_by_billing_tag(
         parameters = {
             "offset": offset,
             "pageSize": page_size,
-            "missingBillingTagOnly": str(missing_tag_only).lower()
+            "missingBillingTagOnly": str(missing_tag_only).lower(),
         }
 
         optional_params = {
@@ -1887,18 +2045,9 @@ def project_billing_tag_bulk_update(self, projects_tag: dict[str, str]) -> dict:
         """
         value_list = []
         for key, value in projects_tag.items():
-            value_list.append(
-                {
-                    "projectId": key,
-                    "billingTag": {
-                        "tag": value
-                    }
-                }
-            )
+            value_list.append({"projectId": key, "billingTag": {"tag": value}})
 
-        data = {
-            "projectsBillingTags": value_list
-        }
+        data = {"projectsBillingTags": value_list}
 
         url = self._routes.projects_billing_tags()
         return self.request_manager.post(url, data=json.dumps(data)).json()
@@ -1999,10 +2148,7 @@ def _validate_blob_path(path):
         normalized_path = os.path.normpath(path)
         if path != normalized_path:
             raise exceptions.MalformedInputException(
-                (
-                    "Path should be normalized and cannot contain "
-                    "'..' or '../'. "
-                )
+                ("Path should be normalized and cannot contain " "'..' or '../'. ")
             )
 
     @staticmethod
@@ -2021,13 +2167,15 @@ def _validate_information_data_type(info: dict):
             )
 
     @staticmethod
-    def _generate_budget(budget_label: BudgetLabel, budget_id: str, budget_limit: float) -> dict:
+    def _generate_budget(
+        budget_label: BudgetLabel, budget_id: str, budget_limit: float
+    ) -> dict:
         return {
             "limit": budget_limit,
             "labelId": budget_id,
             "window": "monthly",
             "budgetLabel": budget_label.value,
-            "budgetType": BudgetType.OVERRIDE.value
+            "budgetType": BudgetType.OVERRIDE.value,
         }
 
     @staticmethod
diff --git a/domino/http_request_manager.py b/domino/http_request_manager.py
index 96d31cd7..7172b474 100644
--- a/domino/http_request_manager.py
+++ b/domino/http_request_manager.py
@@ -10,13 +10,12 @@
 from .constants import DOMINO_VERIFY_CERTIFICATE
 from .exceptions import ReloginRequiredException
 
-
 R_SESSION_MAX_RETRIES = 4
 
 
 class _SessionInitializer:
     def __initialize__(self, session):
-        raise NotImplementedError('Session initializers must be callable.')
+        raise NotImplementedError("Session initializers must be callable.")
 
 
 class _HttpRequestManager:
diff --git a/domino/routes.py b/domino/routes.py
index ed20e1a2..9b5c3f7b 100644
--- a/domino/routes.py
+++ b/domino/routes.py
@@ -1,8 +1,6 @@
 import warnings
-
-from urllib.parse import quote
-
 from typing import Optional
+from urllib.parse import quote
 
 
 class _Routes:
@@ -59,14 +57,14 @@ def runs_list(self):
     def runs_start(self):
         return self._build_project_url() + "/runs"
 
-    def runs_status(self, runId):
-        return self._build_project_url() + "/runs/" + runId
+    def runs_status(self, run_id):
+        return self._build_project_url() + "/runs/" + run_id
 
-    def runs_stdout(self, runId):
-        return self._build_project_url() + "/run/" + runId + "/stdout"
+    def runs_stdout(self, run_id):
+        return self._build_project_url() + "/run/" + run_id + "/stdout"
 
-    def files_list(self, commitId, path):
-        return self._build_project_url() + "/files/" + commitId + "/" + path
+    def files_list(self, commit_id, path):
+        return self._build_project_url() + "/files/" + commit_id + "/" + path
 
     def files_upload(self, path):
         return self._build_project_url() + path
@@ -76,13 +74,18 @@ def commits_list(self):
 
     # Deprecated - use blobs_get_v2 instead
     def blobs_get(self, key):
-        message = "blobs_get is deprecated and will soon be removed. Please migrate to blobs_get_v2 and adjust the " \
-                  "input parameters accordingly "
+        message = (
+            "blobs_get is deprecated and will soon be removed. Please migrate to blobs_get_v2 and adjust the "
+            "input parameters accordingly "
+        )
         warnings.warn(message, DeprecationWarning)
         return self._build_project_url() + "/blobs/" + key
 
     def blobs_get_v2(self, path, commit_id, project_id):
-        return self.host + f"/api/projects/v1/projects/{project_id}/files/{commit_id}/{path}/content"
+        return (
+            self.host
+            + f"/api/projects/v1/projects/{project_id}/files/{commit_id}/{path}/content"
+        )
 
     def fork_project(self, project_id):
         return self.host + f"/v4/projects/{project_id}/fork"
@@ -194,7 +197,10 @@ def revision_create(self, environment_id):
         return self._build_beta_environments_url() + f"/{environment_id}/revisions"
 
     def revision_patch(self, environment_id, revision_id):
-        return self._build_beta_environments_url() + f"/{environment_id}/revisions/{revision_id}"
+        return (
+            self._build_beta_environments_url()
+            + f"/{environment_id}/revisions/{revision_id}"
+        )
 
     # Deployment URLs
 
@@ -208,15 +214,17 @@ def job_start(self):
     def job_stop(self):
         return f"{self.host}/v4/jobs/stop"
 
-    def jobs_list(self,
-                  project_id,
-                  order_by,
-                  sort_by,
-                  page_size,
-                  page_no,
-                  show_archived,
-                  status,
-                  tag):
+    def jobs_list(
+        self,
+        project_id,
+        order_by,
+        sort_by,
+        page_size,
+        page_no,
+        show_archived,
+        status,
+        tag,
+    ):
 
         order_by_query = f"&order_by={order_by}"
         sort_by_query = f"&sort_by={sort_by}"
@@ -260,20 +268,16 @@ def datasets_details(self, dataset_id):
         return self.host + "/dataset" + "/" + str(dataset_id)
 
     def datasets_start_upload(self, dataset_id):
-        return self.host + f"/v4/datasetrw/datasets/{str(dataset_id)}/snapshot/file/start"
+        return (
+            self.host + f"/v4/datasetrw/datasets/{str(dataset_id)}/snapshot/file/start"
+        )
 
     def datasets_test_chunk(
-        self,
-        dataset_id,
-        upload_key,
-        chunk_number,
-        total_chunks,
-        identifier,
-        checksum
+        self, dataset_id, upload_key, chunk_number, total_chunks, identifier, checksum
     ):
         return (
-            self.host +
-            f"/v4/datasetrw/datasets/{str(dataset_id)}/snapshot/file/test?key={upload_key}"
+            self.host
+            + f"/v4/datasetrw/datasets/{str(dataset_id)}/snapshot/file/test?key={upload_key}"
             f"&resumableChunkNumber={chunk_number}&resumableIdentifier={quote(identifier)}"
             f"&resumableTotalChunks={total_chunks}&checksum={quote(checksum)}"
         )
@@ -288,21 +292,27 @@ def datasets_upload_chunk(
         current_chunk_size,
         identifier,
         resumable_relative_path,
-        checksum
+        checksum,
     ):
         return (
-            self.host +
-            f"/v4/datasetrw/datasets/{dataset_id}/snapshot/file?key={key}&resumableChunkNumber={chunk_number}" +
-            f"&resumableChunkSize={target_chunk_size}&resumableCurrentChunkSize={current_chunk_size}"
+            self.host
+            + f"/v4/datasetrw/datasets/{dataset_id}/snapshot/file?key={key}&resumableChunkNumber={chunk_number}"
+            + f"&resumableChunkSize={target_chunk_size}&resumableCurrentChunkSize={current_chunk_size}"
             f"&resumableIdentifier={quote(identifier)}&resumableRelativePath={quote(resumable_relative_path)}"
             f"&resumableTotalChunks={total_chunks}&checksum={quote(checksum)}"
         )
 
     def datasets_cancel_upload(self, dataset_id, upload_key):
-        return self.host + f"/v4/datasetrw/datasets/{dataset_id}/snapshot/file/cancel/{upload_key}"
+        return (
+            self.host
+            + f"/v4/datasetrw/datasets/{dataset_id}/snapshot/file/cancel/{upload_key}"
+        )
 
     def datasets_end_upload(self, dataset_id, upload_key, target_relative_path=None):
-        url = self.host + f"/v4/datasetrw/datasets/{dataset_id}/snapshot/file/end/{upload_key}"
+        url = (
+            self.host
+            + f"/v4/datasetrw/datasets/{dataset_id}/snapshot/file/end/{upload_key}"
+        )
         if target_relative_path:
             url += f"?targetRelativePath={target_relative_path}"
         return url
diff --git a/examples/example_budget_manager.py b/examples/example_budget_manager.py
index 878324c4..a453df52 100644
--- a/examples/example_budget_manager.py
+++ b/examples/example_budget_manager.py
@@ -1,9 +1,9 @@
 import os
-from pprint import pprint
 import uuid
+from pprint import pprint
 
 from domino import Domino
-from domino.domino_enums import BudgetLabel, BillingTagSettingMode
+from domino.domino_enums import BillingTagSettingMode, BudgetLabel
 
 
 def get_uuid() -> str:
@@ -82,7 +82,9 @@ def get_uuid() -> str:
 pprint(bt_setting)
 
 # update billingtag settings modes
-bt_settings_optional = domino.billing_tag_settings_mode_update(BillingTagSettingMode.OPTIONAL)
+bt_settings_optional = domino.billing_tag_settings_mode_update(
+    BillingTagSettingMode.OPTIONAL
+)
 bt_setting = domino.billing_tag_settings()
 pprint(bt_setting)
 
@@ -96,7 +98,9 @@ def get_uuid() -> str:
 pprint(active_billing_tags)
 
 # create new or unarchive existing billingtags
-new_billing_tags = domino.billing_tags_create(["BTExample003", "BTExample04", "BTExample06"])
+new_billing_tags = domino.billing_tags_create(
+    ["BTExample003", "BTExample04", "BTExample06"]
+)
 active_billing_tags = domino.billing_tags_list_active()
 pprint(active_billing_tags)
 
@@ -115,7 +119,9 @@ def get_uuid() -> str:
 
 # create projects with billing tags (billing tags settings mode must be Optional or Required)
 example_project_name = f"example-project-{get_uuid()}"
-bt_project = domino.project_create_v4(project_name=example_project_name, billing_tag="BTExample04")
+bt_project = domino.project_create_v4(
+    project_name=example_project_name, billing_tag="BTExample04"
+)
 pprint(bt_project)
 
 # create projects with billing tags (billing tags settings mode must be Optional or Disabled)
@@ -142,7 +148,11 @@ def get_uuid() -> str:
 pprint(projects_bt_04)
 
 # update projects' billing tags in bulk
-projects_tags = {bt_project["id"]: "BTExample06", project["id"]: "BTExample06", domino.project_id: "BTExample04"}
+projects_tags = {
+    bt_project["id"]: "BTExample06",
+    project["id"]: "BTExample06",
+    domino.project_id: "BTExample04",
+}
 domino.project_billing_tag_bulk_update(projects_tags)
 
 # query project by billing tag
diff --git a/examples/models_and_environments.py b/examples/models_and_environments.py
index 6592eb82..bae29576 100644
--- a/examples/models_and_environments.py
+++ b/examples/models_and_environments.py
@@ -3,7 +3,7 @@
 from domino import Domino
 
 domino = Domino(
-    os.environ['DOMINO_TEST_PROJECT'],
+    os.environ["DOMINO_TEST_PROJECT"],
     api_key=os.environ["DOMINO_USER_API_KEY"],
     host=os.environ["DOMINO_API_HOST"],
 )
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..817843f0
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,5 @@
+[tool.isort]
+profile = "black"
+
+[tool.black]
+target-version = ["py310"]
diff --git a/scripts/check_snake_case.py b/scripts/check_snake_case.py
index ec11ce22..2766a356 100644
--- a/scripts/check_snake_case.py
+++ b/scripts/check_snake_case.py
@@ -3,6 +3,7 @@
 Check that no camelCase parameter or variable names are introduced in domino/ source.
 Usage: python scripts/check_snake_case.py [file ...]
 """
+
 import ast
 import re
 import sys
diff --git a/setup.py b/setup.py
index 07db0ddc..e4aa346d 100644
--- a/setup.py
+++ b/setup.py
@@ -91,6 +91,6 @@ def get_version():
         "docs": [
             "sphinx>=7.4.0",
             "markupsafe==2.0.1",  # added for using Jinja2 with sphinx and python 3.10
-        ]
+        ],
     },
 )
diff --git a/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py
index 3e8107c4..241a98f0 100644
--- a/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_failure_envelope_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestFailureEnvelopeV1(unittest.TestCase):
     """FailureEnvelopeV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py
index a0dd2243..a925cf64 100644
--- a/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_invalid_body_envelope_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestInvalidBodyEnvelopeV1(unittest.TestCase):
     """InvalidBodyEnvelopeV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_metadata_v1.py b/tests/_impl/custommetrics/test_models/test_metadata_v1.py
index 17464f6e..fa7dd65e 100644
--- a/tests/_impl/custommetrics/test_models/test_metadata_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metadata_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestMetadataV1(unittest.TestCase):
     """MetadataV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py b/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py
index 68229e7c..0cadce9c 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_alert_request_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestMetricAlertRequestV1(unittest.TestCase):
     """MetricAlertRequestV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py b/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py
index 77d7f18a..6712115c 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_tag_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestMetricTagV1(unittest.TestCase):
     """MetricTagV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_metric_value_v1.py b/tests/_impl/custommetrics/test_models/test_metric_value_v1.py
index f8290b83..997983db 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_value_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_value_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestMetricValueV1(unittest.TestCase):
     """MetricValueV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py
index 8f6129d2..cfde4363 100644
--- a/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_metric_values_envelope_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestMetricValuesEnvelopeV1(unittest.TestCase):
     """MetricValuesEnvelopeV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py b/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py
index db257d67..a099564d 100644
--- a/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_new_metric_value_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestNewMetricValueV1(unittest.TestCase):
     """NewMetricValueV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py b/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py
index 283cd223..55195e0c 100644
--- a/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_new_metric_values_envelope_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestNewMetricValuesEnvelopeV1(unittest.TestCase):
     """NewMetricValuesEnvelopeV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_models/test_target_range_v1.py b/tests/_impl/custommetrics/test_models/test_target_range_v1.py
index 68f8cc9b..41fe32d3 100644
--- a/tests/_impl/custommetrics/test_models/test_target_range_v1.py
+++ b/tests/_impl/custommetrics/test_models/test_target_range_v1.py
@@ -1,12 +1,12 @@
 # coding: utf-8
 
 """
-    Domino Public API
+Domino Public API
 
-    Public API endpoints for Custom Metrics  # noqa: E501
+Public API endpoints for Custom Metrics  # noqa: E501
 
-    The version of the OpenAPI document: 5.3.0
-    Generated by: https://openapi-generator.tech
+The version of the OpenAPI document: 5.3.0
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
@@ -16,8 +16,9 @@
 
 class TestTargetRangeV1(unittest.TestCase):
     """TargetRangeV1 unit test stubs"""
+
     _configuration = configuration.Configuration()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_paths/__init__.py b/tests/_impl/custommetrics/test_paths/__init__.py
index 1309632d..4f9d44bd 100644
--- a/tests/_impl/custommetrics/test_paths/__init__.py
+++ b/tests/_impl/custommetrics/test_paths/__init__.py
@@ -6,43 +6,37 @@
 
 
 class ApiTestMixin:
-    json_content_type = 'application/json'
-    user_agent = 'OpenAPI-Generator/1.0.0/python'
+    json_content_type = "application/json"
+    user_agent = "OpenAPI-Generator/1.0.0/python"
 
     @classmethod
     def assert_pool_manager_request_called_with(
         cls,
         mock_request,
         url: str,
-        method: str = 'POST',
+        method: str = "POST",
         body: typing.Optional[bytes] = None,
         content_type: typing.Optional[str] = None,
         accept_content_type: typing.Optional[str] = None,
         stream: bool = False,
     ):
-        headers = {
-            'User-Agent': cls.user_agent
-        }
+        headers = {"User-Agent": cls.user_agent}
         if accept_content_type:
-            headers['Accept'] = accept_content_type
+            headers["Accept"] = accept_content_type
         if content_type:
-            headers['Content-Type'] = content_type
+            headers["Content-Type"] = content_type
         kwargs = dict(
             headers=HTTPHeaderDict(headers),
             preload_content=not stream,
             timeout=None,
         )
-        if content_type and method != 'GET':
-            kwargs['body'] = body
-        mock_request.assert_called_with(
-            method,
-            url,
-            **kwargs
-        )
+        if content_type and method != "GET":
+            kwargs["body"] = body
+        mock_request.assert_called_with(method, url, **kwargs)
 
     @staticmethod
     def headers_for_content_type(content_type: str) -> typing.Dict[str, str]:
-        return {'content-type': content_type}
+        return {"content-type": content_type}
 
     @classmethod
     def response(
@@ -51,18 +45,17 @@ def response(
         status: int = 200,
         content_type: str = json_content_type,
         headers: typing.Optional[typing.Dict[str, str]] = None,
-        preload_content: bool = True
+        preload_content: bool = True,
     ) -> urllib3.HTTPResponse:
         if headers is None:
             headers = {}
         headers.update(cls.headers_for_content_type(content_type))
         return urllib3.HTTPResponse(
-            body,
-            headers=headers,
-            status=status,
-            preload_content=preload_content
+            body, headers=headers, status=status, preload_content=preload_content
         )
 
     @staticmethod
     def json_bytes(in_data: typing.Any) -> bytes:
-        return json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode('utf-8')
+        return json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode(
+            "utf-8"
+        )
diff --git a/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py b/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py
index 10c57bb0..6205c963 100644
--- a/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py
+++ b/tests/_impl/custommetrics/test_paths/test_api_metric_alerts_v1/test_post.py
@@ -3,14 +3,13 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
 
-
+from domino._impl.custommetrics import api_client, configuration
 from domino._impl.custommetrics.paths.api_metric_alerts_v1 import post  # noqa: E501
-from domino._impl.custommetrics import configuration, api_client
 
 from .. import ApiTestMixin
 
@@ -20,6 +19,7 @@ class TestApiMetricAlertsV1(ApiTestMixin, unittest.TestCase):
     ApiMetricAlertsV1 unit test stubs
         Send a metric alert  # noqa: E501
     """
+
     _configuration = configuration.Configuration()
 
     def setUp(self):
@@ -30,8 +30,8 @@ def tearDown(self):
         pass
 
     response_status = 200
-    response_body = ''
+    response_body = ""
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py
index bb1871f0..a9087876 100644
--- a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py
+++ b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1/test_post.py
@@ -3,14 +3,13 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
 
-
+from domino._impl.custommetrics import api_client, configuration
 from domino._impl.custommetrics.paths.api_metric_values_v1 import post  # noqa: E501
-from domino._impl.custommetrics import configuration, api_client
 
 from .. import ApiTestMixin
 
@@ -20,6 +19,7 @@ class TestApiMetricValuesV1(ApiTestMixin, unittest.TestCase):
     ApiMetricValuesV1 unit test stubs
         Log metric values  # noqa: E501
     """
+
     _configuration = configuration.Configuration()
 
     def setUp(self):
@@ -30,8 +30,8 @@ def tearDown(self):
         pass
 
     response_status = 201
-    response_body = ''
+    response_body = ""
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py
index c6018acd..0118bd54 100644
--- a/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py
+++ b/tests/_impl/custommetrics/test_paths/test_api_metric_values_v1_model_monitoring_id_metric/test_get.py
@@ -3,14 +3,15 @@
 """
 
 
-    Generated by: https://openapi-generator.tech
+Generated by: https://openapi-generator.tech
 """
 
 import unittest
 
-
-from domino._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric import get  # noqa: E501
-from domino._impl.custommetrics import configuration, api_client
+from domino._impl.custommetrics import api_client, configuration
+from domino._impl.custommetrics.paths.api_metric_values_v1_model_monitoring_id_metric import (  # noqa: E501
+    get,
+)
 
 from .. import ApiTestMixin
 
@@ -20,6 +21,7 @@ class TestApiMetricValuesV1ModelMonitoringIdMetric(ApiTestMixin, unittest.TestCa
     ApiMetricValuesV1ModelMonitoringIdMetric unit test stubs
         Retrieve metric values  # noqa: E501
     """
+
     _configuration = configuration.Configuration()
 
     def setUp(self):
@@ -32,5 +34,5 @@ def tearDown(self):
     response_status = 200
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tests/agents/test_agents_eval_tags.py b/tests/agents/test_agents_eval_tags.py
index 744526a8..822cd4ab 100644
--- a/tests/agents/test_agents_eval_tags.py
+++ b/tests/agents/test_agents_eval_tags.py
@@ -2,5 +2,9 @@
 
 
 def test_build_eval_result_tags():
-    assert build_eval_result_tag('my_metric', '1') == 'domino.prog.metric.my_metric', 'numbers should be metrics'
-    assert build_eval_result_tag('my_label', 'cat') == 'domino.prog.label.my_label', 'strings should be labels'
+    assert (
+        build_eval_result_tag("my_metric", "1") == "domino.prog.metric.my_metric"
+    ), "numbers should be metrics"
+    assert (
+        build_eval_result_tag("my_label", "cat") == "domino.prog.label.my_label"
+    ), "strings should be labels"
diff --git a/tests/agents/test_read_agent_config.py b/tests/agents/test_read_agent_config.py
index 4bfb91f5..89026532 100644
--- a/tests/agents/test_read_agent_config.py
+++ b/tests/agents/test_read_agent_config.py
@@ -1,8 +1,9 @@
-from unittest.mock import patch
 import os
+from unittest.mock import patch
 
 from domino.agents import read_agent_config
 from domino.agents.read_agent_config import flatten_dict
+
 from ..conftest import TEST_AGENTS_ENV_VARS
 
 
@@ -10,36 +11,28 @@ def test_read_agent_config_path_from_env_var():
     with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
         config_values = read_agent_config()
 
-        assert config_values['version'] == 1.0
-        assert config_values['chat_assistant']['model'] == 'gpt-3.5-turbo'
-        assert config_values['chat_assistant']['temperature'] == 0.7
-        assert config_values['chat_assistant']['max_tokens'] == 1500
+        assert config_values["version"] == 1.0
+        assert config_values["chat_assistant"]["model"] == "gpt-3.5-turbo"
+        assert config_values["chat_assistant"]["temperature"] == 0.7
+        assert config_values["chat_assistant"]["max_tokens"] == 1500
 
 
 def test_read_agent_config_path_from_override_arg():
-    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_CONFIG_PATH": "broken_path"}, clear=True):
+    with patch.dict(
+        os.environ,
+        TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_CONFIG_PATH": "broken_path"},
+        clear=True,
+    ):
         config_values = read_agent_config("tests/assets/agent_config.yaml")
 
-        assert config_values['version'] == 1.0
-        assert config_values['chat_assistant']['model'] == 'gpt-3.5-turbo'
-        assert config_values['chat_assistant']['temperature'] == 0.7
-        assert config_values['chat_assistant']['max_tokens'] == 1500
+        assert config_values["version"] == 1.0
+        assert config_values["chat_assistant"]["model"] == "gpt-3.5-turbo"
+        assert config_values["chat_assistant"]["temperature"] == 0.7
+        assert config_values["chat_assistant"]["max_tokens"] == 1500
 
 
 def test_flatten_dict():
-    nested_dict = {
-            'a': 1,
-            'b': {
-                    'c': 2,
-                    'd': {'e': 3}
-            },
-            'f': 4
-    }
+    nested_dict = {"a": 1, "b": {"c": 2, "d": {"e": 3}}, "f": 4}
     flat_dict = flatten_dict(nested_dict)
-    expected_flat_dict = {
-            'a': 1,
-            'b.c': 2,
-            'b.d.e': 3,
-            'f': 4
-    }
+    expected_flat_dict = {"a": 1, "b.c": 2, "b.d.e": 3, "f": 4}
     assert flat_dict == expected_flat_dict
diff --git a/tests/agents/test_verify_domino_support.py b/tests/agents/test_verify_domino_support.py
index 720010b2..a1d08c37 100644
--- a/tests/agents/test_verify_domino_support.py
+++ b/tests/agents/test_verify_domino_support.py
@@ -1,16 +1,22 @@
 import logging
 import os
-import pytest
 from unittest.mock import patch
 
-from domino.agents._constants import MIN_MLFLOW_VERSION, MIN_DOMINO_VERSION
+import pytest
+
+from domino.agents._constants import MIN_DOMINO_VERSION, MIN_MLFLOW_VERSION
 from domino.agents._verify_domino_support import _get_version_endpoint
 from domino.exceptions import UnsupportedOperationException
+
 from ..conftest import TEST_AGENTS_ENV_VARS
 
 
 def test_get_version_endpoint():
-    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True):
+    with patch.dict(
+        os.environ,
+        TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"},
+        clear=True,
+    ):
         assert _get_version_endpoint() == "http://localhost:1111/version"
 
 
@@ -19,40 +25,74 @@ def test_verify_domino_support_when_get_domino_version_fails(caplog):
     If we fail to get the domino version, we shouldn't fail everything, since this may be due to network error
     and likely if they are on the wrong domino version, the mlflow-proxy won't support new code anyway.
     """
-    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True), \
-            patch('domino.agents._verify_domino_support._get_domino_version', side_effect=RuntimeError("test_verify_domino_support_when_get_domino_version_fails")), \
-            patch('domino.agents._verify_domino_support._get_mlflow_version') as mock_get_mlflow_version, \
-            caplog.at_level(logging.DEBUG):
+    with (
+        patch.dict(
+            os.environ,
+            TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"},
+            clear=True,
+        ),
+        patch(
+            "domino.agents._verify_domino_support._get_domino_version",
+            side_effect=RuntimeError(
+                "test_verify_domino_support_when_get_domino_version_fails"
+            ),
+        ),
+        patch(
+            "domino.agents._verify_domino_support._get_mlflow_version"
+        ) as mock_get_mlflow_version,
+        caplog.at_level(logging.DEBUG),
+    ):
 
         from domino.agents._verify_domino_support import _verify_domino_support_impl
+
         mock_get_mlflow_version.return_value = MIN_MLFLOW_VERSION
 
         # Should not raise and should pass
         _verify_domino_support_impl()
-        assert "Failed to get Domino version. Will continue without version info: test_verify_domino_support_when_get_domino_version_fails" in caplog.text
+        assert (
+            "Failed to get Domino version. Will continue without version info: test_verify_domino_support_when_get_domino_version_fails"
+            in caplog.text
+        )
 
 
-def test_verify_domino_support_domino_and_mlflow_correct_version(verify_domino_support_fixture):
+def test_verify_domino_support_domino_and_mlflow_correct_version(
+    verify_domino_support_fixture,
+):
     from domino.agents._verify_domino_support import _verify_domino_support_impl
-    verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
-    verify_domino_support_fixture['mock_get_mlflow_version'].return_value = MIN_MLFLOW_VERSION
+
+    verify_domino_support_fixture["mock_get_domino_version"].return_value = (
+        MIN_DOMINO_VERSION
+    )
+    verify_domino_support_fixture["mock_get_mlflow_version"].return_value = (
+        MIN_MLFLOW_VERSION
+    )
 
     # Should not raise
     _verify_domino_support_impl()
 
 
 @pytest.mark.order(1)
-def test_verify_domino_support_should_be_idempotent(verify_domino_support_fixture, mocker):
+def test_verify_domino_support_should_be_idempotent(
+    verify_domino_support_fixture, mocker
+):
     """
     This test must run first, because if verifies global functionality, which is incidentally exercised
     by other tests.
     """
     from domino.agents._verify_domino_support import verify_domino_support
-    verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
-    verify_domino_support_fixture['mock_get_mlflow_version'].return_value = MIN_MLFLOW_VERSION
+
+    verify_domino_support_fixture["mock_get_domino_version"].return_value = (
+        MIN_DOMINO_VERSION
+    )
+    verify_domino_support_fixture["mock_get_mlflow_version"].return_value = (
+        MIN_MLFLOW_VERSION
+    )
 
     import domino.agents._verify_domino_support
-    get_domino_version_spy = mocker.spy(domino.agents._verify_domino_support, "_get_domino_version")
+
+    get_domino_version_spy = mocker.spy(
+        domino.agents._verify_domino_support, "_get_domino_version"
+    )
 
     verify_domino_support()
     verify_domino_support()
@@ -62,28 +102,50 @@ def test_verify_domino_support_should_be_idempotent(verify_domino_support_fixtur
 
 def test_verify_domino_support_domino_wrong_version(verify_domino_support_fixture):
     from domino.agents._verify_domino_support import _verify_domino_support_impl
-    verify_domino_support_fixture['mock_get_domino_version'].return_value = "6.1.2"
+
+    verify_domino_support_fixture["mock_get_domino_version"].return_value = "6.1.2"
 
     with pytest.raises(UnsupportedOperationException) as exn:
         _verify_domino_support_impl()
 
-    assert str(exn.value) == "This version of Domino doesn’t support the agents package."
+    assert (
+        str(exn.value) == "This version of Domino doesn’t support the agents package."
+    )
 
 
 def test_verify_domino_support_mlflow_wrong_version(verify_domino_support_fixture):
     from domino.agents._verify_domino_support import _verify_domino_support_impl
-    verify_domino_support_fixture['mock_get_domino_version'].return_value = MIN_DOMINO_VERSION
-    verify_domino_support_fixture['mock_get_mlflow_version'].return_value = '3.1.0'
+
+    verify_domino_support_fixture["mock_get_domino_version"].return_value = (
+        MIN_DOMINO_VERSION
+    )
+    verify_domino_support_fixture["mock_get_mlflow_version"].return_value = "3.1.0"
 
     with pytest.raises(UnsupportedOperationException) as exn:
         _verify_domino_support_impl()
 
-    assert str(exn.value) == f"This code requires you to install mlflow>={MIN_MLFLOW_VERSION}"
+    assert (
+        str(exn.value)
+        == f"This code requires you to install mlflow>={MIN_MLFLOW_VERSION}"
+    )
 
 
 @pytest.fixture
 def verify_domino_support_fixture():
-    with patch.dict(os.environ, TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"}, clear=True), \
-            patch('domino.agents._verify_domino_support._get_domino_version') as mock_get_domino_version, \
-            patch('domino.agents._verify_domino_support._get_mlflow_version') as mock_get_mlflow_version:
-        yield {'mock_get_domino_version': mock_get_domino_version, 'mock_get_mlflow_version': mock_get_mlflow_version}
+    with (
+        patch.dict(
+            os.environ,
+            TEST_AGENTS_ENV_VARS | {"DOMINO_API_HOST": "http://localhost:1111/"},
+            clear=True,
+        ),
+        patch(
+            "domino.agents._verify_domino_support._get_domino_version"
+        ) as mock_get_domino_version,
+        patch(
+            "domino.agents._verify_domino_support._get_mlflow_version"
+        ) as mock_get_mlflow_version,
+    ):
+        yield {
+            "mock_get_domino_version": mock_get_domino_version,
+            "mock_get_mlflow_version": mock_get_mlflow_version,
+        }
diff --git a/tests/conftest.py b/tests/conftest.py
index b593fc64..a69a9459 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -16,7 +16,7 @@
 TEST_AGENTS_ENV_VARS = {
     "MLFLOW_TRACKING_URI": "http://localhost:5000",
     "DOMINO_AGENT_CONFIG_PATH": "tests/assets/agent_config.yaml",
-    "DOMINO_AGENT_IS_PROD": "false"
+    "DOMINO_AGENT_IS_PROD": "false",
 }
 
 version_info = {
@@ -195,6 +195,7 @@ class TestAuth(AuthBase):
         def __init__(self, *args, **kwargs):
             super(TestAuth, self).__init__(*args, **kwargs)
             self.header = None
+
     return TestAuth()
 
 
diff --git a/tests/integration/agents/conftest.py b/tests/integration/agents/conftest.py
index e38568d1..8e3e1dda 100644
--- a/tests/integration/agents/conftest.py
+++ b/tests/integration/agents/conftest.py
@@ -1,19 +1,22 @@
 import logging as logger
 import os
-import polling2
-import pytest
 import shutil
-from unittest.mock import patch
 import subprocess
+from unittest.mock import patch
+
+import polling2
+import pytest
 
-from ...conftest import TEST_AGENTS_ENV_VARS
 from domino.agents._constants import MIN_MLFLOW_VERSION
 
+from ...conftest import TEST_AGENTS_ENV_VARS
+
 
 @pytest.fixture
 def tracing():
     pytest.importorskip("mlflow")
     import domino.agents.tracing as tracing
+
     yield tracing
 
 
@@ -21,6 +24,7 @@ def tracing():
 def logging():
     pytest.importorskip("mlflow")
     import domino.agents.logging as logging
+
     yield logging
 
 
@@ -28,19 +32,20 @@ def logging():
 def mlflow():
     pytest.importorskip("mlflow")
     import mlflow
+
     yield mlflow
 
 
 def _remove_mlruns_data():
     try:
-        shutil.rmtree('./mlruns')
+        shutil.rmtree("./mlruns")
     except Exception as e:
         logger.warning(f"Failed to remove mlfruns directory during test cleanup: {e}")
 
 
 @pytest.fixture(scope="package")
 def setup_openai_mock_server():
-    server_command = ['pipenv', 'run', 'ai-mock', 'server']
+    server_command = ["pipenv", "run", "ai-mock", "server"]
     server_process = subprocess.Popen(server_command)
     yield
     server_process.kill()
@@ -51,34 +56,36 @@ def setup_mlflow_tracking_server_no_env_var_mock(docker_client):
     pytest.importorskip("mlflow")
     from mlflow import MlflowClient
 
-    with patch("domino.agents._verify_domino_support.verify_domino_support", clear=True) as mock_verify_domino_support:
+    with patch(
+        "domino.agents._verify_domino_support.verify_domino_support", clear=True
+    ) as mock_verify_domino_support:
         mock_verify_domino_support.return_value = None
         container_name = "test_mlflow_tracking_server"
         docker_client.containers.run(
-                f"ghcr.io/mlflow/mlflow:v{MIN_MLFLOW_VERSION}",
-                detach=True,
-                name=container_name,
-                ports={5000: 5000},
-                command="mlflow ui --host 0.0.0.0 --serve-artifacts",
+            f"ghcr.io/mlflow/mlflow:v{MIN_MLFLOW_VERSION}",
+            detach=True,
+            name=container_name,
+            ports={5000: 5000},
+            command="mlflow ui --host 0.0.0.0 --serve-artifacts",
         )
 
         try:
             live_container = polling2.poll(
-                    target=lambda: docker_client.containers.get(container_name),
-                    check_success=lambda container: container.status == 'running',
-                    timeout=10,
-                    step=2,
-                    ignore_exceptions=True,
+                target=lambda: docker_client.containers.get(container_name),
+                check_success=lambda container: container.status == "running",
+                timeout=10,
+                step=2,
+                ignore_exceptions=True,
             )
 
             # verify api is reachable
             client = MlflowClient()
-            experiments = polling2.poll(
-                    target=lambda: client.search_experiments(),
-                    check_success=lambda exp: True,
-                    timeout=10,
-                    step=2,
-                    ignore_exceptions=True,
+            experiments = polling2.poll(  # noqa: F841
+                target=lambda: client.search_experiments(),
+                check_success=lambda exp: True,
+                timeout=10,
+                step=2,
+                ignore_exceptions=True,
             )
 
             yield live_container
@@ -87,7 +94,9 @@ def setup_mlflow_tracking_server_no_env_var_mock(docker_client):
         except Exception as e:
             live_container = docker_client.containers.get(container_name)
             container_status = live_container.status
-            logger.error(f'Mlflow tracking server did not get to running state. status: {container_status}')
+            logger.error(
+                f"Mlflow tracking server did not get to running state. status: {container_status}"
+            )
             logger.info(live_container.logs())
             live_container.remove(force=True)
             _remove_mlruns_data()
@@ -95,6 +104,8 @@ def setup_mlflow_tracking_server_no_env_var_mock(docker_client):
 
 
 @pytest.fixture
-def setup_mlflow_tracking_server(setup_mlflow_tracking_server_no_env_var_mock, docker_client):
+def setup_mlflow_tracking_server(
+    setup_mlflow_tracking_server_no_env_var_mock, docker_client
+):
     with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
         yield setup_mlflow_tracking_server_no_env_var_mock
diff --git a/tests/integration/agents/mlflow_fixtures.py b/tests/integration/agents/mlflow_fixtures.py
index 1a423a6e..9185124b 100644
--- a/tests/integration/agents/mlflow_fixtures.py
+++ b/tests/integration/agents/mlflow_fixtures.py
@@ -1,11 +1,13 @@
-from datetime import datetime, timedelta
-import pytest
 import os
-from unittest.mock import patch
+from datetime import datetime, timedelta
 from typing import Optional
+from unittest.mock import patch
+
+import pytest
 
 from domino.agents._client import client
 from domino.agents.tracing._util import build_agent_experiment_name
+
 from .conftest import TEST_AGENTS_ENV_VARS
 from .test_util import reset_prod_tracing
 
@@ -28,15 +30,20 @@ def add_prod_tags(traces: Optional[list], agent_id: str, agent_version: str):
 
     pytest.importorskip("mlflow")
     import mlflow
+
     if not traces:
         exp_name = build_agent_experiment_name(agent_id)
         exp = mlflow.get_experiment_by_name(exp_name)
 
-        traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+        traces = mlflow.search_traces(
+            experiment_ids=[exp.experiment_id], return_type="list"
+        )
 
     for t in traces:
         client.set_trace_tag(t.info.trace_id, "mlflow.domino.app_id", agent_id)
-        client.set_trace_tag(t.info.trace_id, "mlflow.domino.app_version_id", agent_version)
+        client.set_trace_tag(
+            t.info.trace_id, "mlflow.domino.app_version_id", agent_version
+        )
 
 
 def create_span_at_time(name: str, inputs: int, hours_ago: int, experiment_id: str):
@@ -45,16 +52,18 @@ def create_span_at_time(name: str, inputs: int, hours_ago: int, experiment_id: s
 
     dt = datetime.now() - timedelta(hours=hours_ago)
     ns = int(dt.timestamp() * 1e9)
-    span = mlflow.start_span_no_context(name=name, inputs=inputs, experiment_id=experiment_id, start_time_ns=ns)
+    span = mlflow.start_span_no_context(
+        name=name, inputs=inputs, experiment_id=experiment_id, start_time_ns=ns
+    )
     span.end()
 
 
 def fixture_create_prod_traces(
-        agent_id: str,
-        agent_version: str,
-        trace_name: str,
-        tracing,
-        hours_ago: Optional[int] = None,  # also used as input value for span
+    agent_id: str,
+    agent_version: str,
+    trace_name: str,
+    tracing,
+    hours_ago: Optional[int] = None,  # also used as input value for span
 ):
     """Creates prod agent traces with a specific trace name"""
     pytest.importorskip("mlflow")
@@ -66,19 +75,30 @@ def fixture_create_prod_traces(
     def one(x):
         return x
 
-    env_vars = TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": agent_id}
+    env_vars = TEST_AGENTS_ENV_VARS | {
+        "DOMINO_AGENT_IS_PROD": "true",
+        "DOMINO_APP_ID": agent_id,
+    }
     with patch.dict(os.environ, env_vars, clear=True):
         tracing.init_tracing()
         if hours_ago is not None:
-            experiment = mlflow.get_experiment_by_name(build_agent_experiment_name(agent_id))
-            create_span_at_time(trace_name, hours_ago, hours_ago, experiment.experiment_id)
+            experiment = mlflow.get_experiment_by_name(
+                build_agent_experiment_name(agent_id)
+            )
+            create_span_at_time(
+                trace_name, hours_ago, hours_ago, experiment.experiment_id
+            )
         else:
             one(1)
 
         exp_name = build_agent_experiment_name(agent_id)
         exp = mlflow.get_experiment_by_name(exp_name)
 
-        ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string=f"trace.name = '{trace_name}'", return_type='list')
+        ts = mlflow.search_traces(
+            experiment_ids=[exp.experiment_id],
+            filter_string=f"trace.name = '{trace_name}'",
+            return_type="list",
+        )
 
         # add prod tags (would be done by Domino deployment)
         add_prod_tags(ts, agent_id, agent_version)
diff --git a/tests/integration/agents/test_domino_run.py b/tests/integration/agents/test_domino_run.py
index 40790097..6c3d30eb 100644
--- a/tests/integration/agents/test_domino_run.py
+++ b/tests/integration/agents/test_domino_run.py
@@ -1,6 +1,7 @@
-import pytest
 import threading
 
+import pytest
+
 from domino.agents._constants import AGENT_RUN_TAG
 
 
@@ -18,7 +19,11 @@ def test_domino_run_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing, l
     create_external_model_spy = mocker.spy(mlflow, "create_external_model")
     exp = mlflow.set_experiment("test_domino_run")
 
-    @tracing.add_tracing(name="add_numbers", autolog_frameworks=['sklearn'], evaluator=lambda span: {'add_numbers': span.outputs})
+    @tracing.add_tracing(
+        name="add_numbers",
+        autolog_frameworks=["sklearn"],
+        evaluator=lambda span: {"add_numbers": span.outputs},
+    )
     def add_numbers(x, y):
         return x + y
 
@@ -29,70 +34,82 @@ def add_numbers(x, y):
         add_numbers(2, 2)
 
     # verify logged model created only once
-    assert create_external_model_spy.call_count == 1, "create external model should be called once"
+    assert (
+        create_external_model_spy.call_count == 1
+    ), "create external model should be called once"
 
-    models = mlflow.search_logged_models(experiment_ids=[exp.experiment_id], output_format='list')
+    models = mlflow.search_logged_models(
+        experiment_ids=[exp.experiment_id], output_format="list"
+    )
 
     assert len(models) == 1
 
     model = models[0]
     # verify agent config added as configuration
-    assert model.params['chat_assistant.max_tokens'] == '1500'
-    assert model.params['chat_assistant.model'] == 'gpt-3.5-turbo'
-    assert model.params['chat_assistant.temperature'] == '0.7'
-    assert model.params['version'] == '1.0'
+    assert model.params["chat_assistant.max_tokens"] == "1500"
+    assert model.params["chat_assistant.model"] == "gpt-3.5-turbo"
+    assert model.params["chat_assistant.temperature"] == "0.7"
+    assert model.params["version"] == "1.0"
 
     # verify evaluation traces not logged to model
-    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], model_id=model.model_id, return_type='list')
+    ts = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id], model_id=model.model_id, return_type="list"
+    )
     assert len(ts) == 0, "traces should not be logged to model"
 
     run = mlflow.get_run(run_id)
 
     # verify run has agent config logged to it as parameters
-    assert run.data.params['chat_assistant.max_tokens'] == '1500'
-    assert run.data.params['chat_assistant.model'] == 'gpt-3.5-turbo'
-    assert run.data.params['chat_assistant.temperature'] == '0.7'
-    assert run.data.params['version'] == '1.0'
+    assert run.data.params["chat_assistant.max_tokens"] == "1500"
+    assert run.data.params["chat_assistant.model"] == "gpt-3.5-turbo"
+    assert run.data.params["chat_assistant.temperature"] == "0.7"
+    assert run.data.params["version"] == "1.0"
 
-    assert run.data.tags.get(AGENT_RUN_TAG) == "false", "DominoRun should tag the run as not an agent run"
+    assert (
+        run.data.tags.get(AGENT_RUN_TAG) == "false"
+    ), "DominoRun should tag the run as not an agent run"
 
     # verify run has summary metrics logged to it
     # average of outputs is 2 + 4/2 = 3
-    assert run.data.metrics['mean_add_numbers'] == 3, "average of add_numbers should be 3"
+    assert (
+        run.data.metrics["mean_add_numbers"] == 3
+    ), "average of add_numbers should be 3"
 
 
-def test_domino_run_dev_custom_aggregator(setup_mlflow_tracking_server, mlflow, tracing, logging):
+def test_domino_run_dev_custom_aggregator(
+    setup_mlflow_tracking_server, mlflow, tracing, logging
+):
     """
     DominoRun will contain custom summarizaiton metrics for eval traces
     """
-    exp = mlflow.set_experiment("test_domino_run_custom_aggregator")
+    mlflow.set_experiment("test_domino_run_custom_aggregator")
 
-    @tracing.add_tracing(name="median", evaluator=lambda span: {'median': span.outputs})
+    @tracing.add_tracing(name="median", evaluator=lambda span: {"median": span.outputs})
     def for_median(x):
         return x
 
-    @tracing.add_tracing(name="mean", evaluator=lambda span: {'mean': span.outputs})
+    @tracing.add_tracing(name="mean", evaluator=lambda span: {"mean": span.outputs})
     def for_mean(x):
         return x
 
-    @tracing.add_tracing(name="stdev", evaluator=lambda span: {'stdev': span.outputs})
+    @tracing.add_tracing(name="stdev", evaluator=lambda span: {"stdev": span.outputs})
     def for_stdev(x):
         return x
 
-    @tracing.add_tracing(name="min", evaluator=lambda span: {'min': span.outputs})
+    @tracing.add_tracing(name="min", evaluator=lambda span: {"min": span.outputs})
     def for_min(x):
         return x
 
-    @tracing.add_tracing(name="max", evaluator=lambda span: {'max': span.outputs})
+    @tracing.add_tracing(name="max", evaluator=lambda span: {"max": span.outputs})
     def for_max(x):
         return x
 
     summarization_metrics = [
-            ('median', 'median'),
-            ('mean', 'mean'),
-            ('stdev', 'stdev'),
-            ('min', 'min'),
-            ('max', 'max')
+        ("median", "median"),
+        ("mean", "mean"),
+        ("stdev", "stdev"),
+        ("min", "min"),
+        ("max", "max"),
     ]
     run_id = None
     with logging.DominoRun(custom_summary_metrics=summarization_metrics) as run:
@@ -109,26 +126,30 @@ def for_max(x):
     # verify run has summary metrics logged to it
     # mean of outputs is 2 + 4/2 = 3
     # median is 2, 2, 4 = 2
-    assert run.data.metrics['median_median'] == 3
-    assert run.data.metrics['mean_mean'] == 3
-    assert run.data.metrics['stdev_stdev'] == 1.581
-    assert run.data.metrics['min_min'] == 1
-    assert run.data.metrics['max_max'] == 5
+    assert run.data.metrics["median_median"] == 3
+    assert run.data.metrics["mean_mean"] == 3
+    assert run.data.metrics["stdev_stdev"] == 1.581
+    assert run.data.metrics["min_min"] == 1
+    assert run.data.metrics["max_max"] == 5
 
 
-def test_domino_run_dev_bad_custom_aggregator(setup_mlflow_tracking_server, mlflow, tracing, logging):
+def test_domino_run_dev_bad_custom_aggregator(
+    setup_mlflow_tracking_server, mlflow, tracing, logging
+):
     """
     DominoRun will fail if one of the aggregators is invalid
     """
-    exp = mlflow.set_experiment("test_domino_run_dev_bad_custom_aggregator")
+    mlflow.set_experiment("test_domino_run_dev_bad_custom_aggregator")
 
-    summarization_metrics = [('max', 'sdf')]
+    summarization_metrics = [("max", "sdf")]
 
     with pytest.raises(ValueError):
         logging.DominoRun(custom_summary_metrics=summarization_metrics)
 
 
-def test_domino_run_configure_experiment_name(setup_mlflow_tracking_server, mlflow, logging, tracing):
+def test_domino_run_configure_experiment_name(
+    setup_mlflow_tracking_server, mlflow, logging, tracing
+):
     """
     if an experiment name is provided, the DominoRun will create a run in that experiment
     and log traces to it
@@ -147,19 +168,25 @@ def unit(x):
 
     run = mlflow.get_run(run_id)
 
-    traces = mlflow.search_traces(experiment_ids=[exp_id], filter_string="trace.name = 'unit'")
+    traces = mlflow.search_traces(
+        experiment_ids=[exp_id], filter_string="trace.name = 'unit'"
+    )
 
-    assert run.info.experiment_id == exp_id, "run should belong to test_domino_run_configure_experiment_name_other"
+    assert (
+        run.info.experiment_id == exp_id
+    ), "run should belong to test_domino_run_configure_experiment_name_other"
     assert len(traces) == 1
 
 
-def test_domino_run_extend_current_run(setup_mlflow_tracking_server, mlflow, logging, tracing):
+def test_domino_run_extend_current_run(
+    setup_mlflow_tracking_server, mlflow, logging, tracing
+):
     """
     if a run_id is provided, then the DominoRun with add traces to that run
     """
     mlflow.set_experiment("test_domino_run_extend_current_run")
 
-    @tracing.add_tracing(name="unit", evaluator=lambda span: {'unit': span.outputs})
+    @tracing.add_tracing(name="unit", evaluator=lambda span: {"unit": span.outputs})
     def unit(x):
         return x
 
@@ -174,31 +201,43 @@ def unit(x):
         second_run_id = run.info.run_id
         unit(2)
 
-    traces = mlflow.search_traces(experiment_ids=[run.info.experiment_id], filter_string=f"metadata.mlflow.sourceRun = '{first_run_id}'", return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[run.info.experiment_id],
+        filter_string=f"metadata.mlflow.sourceRun = '{first_run_id}'",
+        return_type="list",
+    )
 
     assert first_run_id == second_run_id, "Both runs should have the same run_id"
     assert len(traces) == 2, "There should be two traces for unit"
 
     resumed_run = mlflow.get_run(first_run_id)
-    assert resumed_run.data.tags.get(AGENT_RUN_TAG) == "false", "DominoRun should tag the run as not an agent run"
+    assert (
+        resumed_run.data.tags.get(AGENT_RUN_TAG) == "false"
+    ), "DominoRun should tag the run as not an agent run"
 
     # each domino run should have an external model linked to it
-    models = mlflow.search_logged_models(experiment_ids=[run.info.experiment_id], output_format='list')
+    models = mlflow.search_logged_models(
+        experiment_ids=[run.info.experiment_id], output_format="list"
+    )
     assert [m.source_run_id for m in models] == [first_run_id, first_run_id]
 
 
-def test_domino_run_should_not_swallow_exceptions(setup_mlflow_tracking_server, mlflow, logging):
+def test_domino_run_should_not_swallow_exceptions(
+    setup_mlflow_tracking_server, mlflow, logging
+):
     """
     If the user's code raises an exception, the DominoRun should allow user code to catch it
     """
     mlflow.set_experiment("test_domino_run_should_not_swallow_exceptions")
 
     with pytest.raises(ZeroDivisionError):
-        with logging.DominoRun() as run:
-            1/0
+        with logging.DominoRun() as _run:  # noqa: F841
+            1 / 0
 
 
-def test_domino_run_parallelized_logic(setup_mlflow_tracking_server, mlflow, logging, tracing):
+def test_domino_run_parallelized_logic(
+    setup_mlflow_tracking_server, mlflow, logging, tracing
+):
     """
     Logic run in threads should execute normally
     """
@@ -222,23 +261,33 @@ def b(num):
         t1.join()
         t2.join()
 
-    traces_a = mlflow.search_traces(filter_string="trace.name = 'a'", return_type='list')
-    traces_b = mlflow.search_traces(filter_string="trace.name = 'b'", return_type='list')
+    traces_a = mlflow.search_traces(
+        filter_string="trace.name = 'a'", return_type="list"
+    )
+    traces_b = mlflow.search_traces(
+        filter_string="trace.name = 'b'", return_type="list"
+    )
 
     def get_run_id(trace):
-        return trace.info.trace_metadata.get('mlflow.sourceRun')
+        return trace.info.trace_metadata.get("mlflow.sourceRun")
 
     assert len(traces_a) == 1, "There should be one trace for a"
     assert len(traces_b) == 1, "There should be one trace for b"
-    assert get_run_id(traces_a[0]) == get_run_id(traces_b[0]), "The a and b traces should belong to the same run"
+    assert get_run_id(traces_a[0]) == get_run_id(
+        traces_b[0]
+    ), "The a and b traces should belong to the same run"
 
 
-def test_domino_run_extend_concluded_run_manual_evals_mean_logged(setup_mlflow_tracking_server, mlflow, tracing, logging):
+def test_domino_run_extend_concluded_run_manual_evals_mean_logged(
+    setup_mlflow_tracking_server, mlflow, tracing, logging
+):
     """
     When extending a concluded run, manual log_evaluation calls inside the DominoRun block
     are summarized and the average metric is logged to the run.
     """
-    mlflow.set_experiment("test_domino_run_extend_concluded_run_manual_evals_mean_logged")
+    mlflow.set_experiment(
+        "test_domino_run_extend_concluded_run_manual_evals_mean_logged"
+    )
 
     @tracing.add_tracing(name="add_numbers")
     def add_numbers(x, y):
@@ -252,27 +301,35 @@ def add_numbers(x, y):
 
     # Extend the concluded run and log manual evaluations; DominoRun should log mean summary (3)
     with logging.DominoRun(run_id=concluded_run_id):
-        traces_resp = tracing.search_traces(run_id=concluded_run_id, trace_name="add_numbers")
+        traces_resp = tracing.search_traces(
+            run_id=concluded_run_id, trace_name="add_numbers"
+        )
         for t in traces_resp.data:
             # use the function output as the evaluation value
             value = t.spans[0].outputs
             logging.log_evaluation(
-                    trace_id=t.id,
-                    name="helpfulness",
-                    value=value,
+                trace_id=t.id,
+                name="helpfulness",
+                value=value,
             )
 
     run = mlflow.get_run(concluded_run_id)
     # average of 2 + 4 = 3
-    assert run.data.metrics['mean_helpfulness'] == 3, "average of helpfulness should be 3"
+    assert (
+        run.data.metrics["mean_helpfulness"] == 3
+    ), "average of helpfulness should be 3"
 
 
-def test_domino_run_extend_concluded_run_manual_evals_custom_aggregator_logged(setup_mlflow_tracking_server, mlflow, tracing, logging):
+def test_domino_run_extend_concluded_run_manual_evals_custom_aggregator_logged(
+    setup_mlflow_tracking_server, mlflow, tracing, logging
+):
     """
     When extending a concluded run, with a custom aggregator, manual log_evaluation calls inside the
     DominoRun block are summarized using the custom aggregator and logged to the run.
     """
-    mlflow.set_experiment("test_domino_run_extend_concluded_run_manual_evals_custom_aggregator_logged")
+    mlflow.set_experiment(
+        "test_domino_run_extend_concluded_run_manual_evals_custom_aggregator_logged"
+    )
 
     @tracing.add_tracing(name="add_numbers")
     def add_numbers(x, y):
@@ -286,50 +343,58 @@ def add_numbers(x, y):
 
     # Extend the concluded run and log manual evaluations; DominoRun should log custom summary (max -> 4)
     custom_summary_metrics = [("helpfulness", "max")]
-    with logging.DominoRun(run_id=concluded_run_id, custom_summary_metrics=custom_summary_metrics):
-        traces_resp = tracing.search_traces(run_id=concluded_run_id, trace_name="add_numbers")
+    with logging.DominoRun(
+        run_id=concluded_run_id, custom_summary_metrics=custom_summary_metrics
+    ):
+        traces_resp = tracing.search_traces(
+            run_id=concluded_run_id, trace_name="add_numbers"
+        )
         for t in traces_resp.data:
             value = t.spans[0].outputs
             logging.log_evaluation(
-                    trace_id=t.id,
-                    name="helpfulness",
-                    value=value,
+                trace_id=t.id,
+                name="helpfulness",
+                value=value,
             )
 
     run = mlflow.get_run(concluded_run_id)
     # max of 2 and 4 is 4
-    assert run.data.metrics['max_helpfulness'] == 4, "max of helpfulness should be 4"
+    assert run.data.metrics["max_helpfulness"] == 4, "max of helpfulness should be 4"
 
 
-def test_domino_run_recomputes_existing_aggregations(setup_mlflow_tracking_server, mlflow, tracing, logging):
+def test_domino_run_recomputes_existing_aggregations(
+    setup_mlflow_tracking_server, mlflow, tracing, logging
+):
     """
     When a run already has aggregated metrics (e.g., max_), a subsequent DominoRun
     on the same run_id recomputes those aggregations in addition to defaults.
     """
-    exp = mlflow.set_experiment("test_domino_run_recomputes_existing_aggregations")
+    mlflow.set_experiment("test_domino_run_recomputes_existing_aggregations")
 
-    @tracing.add_tracing(name="agg", evaluator=lambda span: {'agg': span.outputs})
+    @tracing.add_tracing(name="agg", evaluator=lambda span: {"agg": span.outputs})
     def agg_fn(x):
         return x
 
     run_id = None
     # First run computes both default mean and custom max aggregations
-    with logging.DominoRun(custom_summary_metrics=[('agg', 'mean'), ('agg', 'max')]) as run:
+    with logging.DominoRun(
+        custom_summary_metrics=[("agg", "mean"), ("agg", "max")]
+    ) as run:
         run_id = run.info.run_id
         agg_fn(1)
         agg_fn(3)
 
     run = mlflow.get_run(run_id)
-    assert run.data.metrics['mean_agg'] == 2, 'mean should be 2'
-    assert run.data.metrics['max_agg'] == 3, 'max should be 3'
+    assert run.data.metrics["mean_agg"] == 2, "mean should be 2"
+    assert run.data.metrics["max_agg"] == 3, "max should be 3"
 
     # Second run continues the same run and adds a new value; expects recomputed max (and mean)
-    with logging.DominoRun(run_id=run_id) as run2:
+    with logging.DominoRun(run_id=run_id) as _run2:  # noqa: F841
         agg_fn(5)
 
     run = mlflow.get_run(run_id)
-    assert run.data.metrics['max_agg'] == 5, 'max should be 5'
-    assert run.data.metrics['mean_agg'] == 3, 'mean should be 3'
+    assert run.data.metrics["max_agg"] == 5, "max should be 5"
+    assert run.data.metrics["mean_agg"] == 3, "mean should be 3"
 
 
 def test_domino_agent_context_tags_run(setup_mlflow_tracking_server, mlflow, logging):
@@ -339,10 +404,14 @@ def test_domino_agent_context_tags_run(setup_mlflow_tracking_server, mlflow, log
         run_id = run.info.run_id
 
     run = mlflow.get_run(run_id)
-    assert run.data.tags.get(AGENT_RUN_TAG) == "true", "DominoAgentContext should tag the run"
+    assert (
+        run.data.tags.get(AGENT_RUN_TAG) == "true"
+    ), "DominoAgentContext should tag the run"
 
 
-def test_domino_agent_context_tags_resumed_run(setup_mlflow_tracking_server, mlflow, logging):
+def test_domino_agent_context_tags_resumed_run(
+    setup_mlflow_tracking_server, mlflow, logging
+):
     mlflow.set_experiment("test_domino_agent_context_tags_resumed_run")
 
     with logging.DominoAgentContext() as run:
@@ -352,4 +421,6 @@ def test_domino_agent_context_tags_resumed_run(setup_mlflow_tracking_server, mlf
         pass
 
     run = mlflow.get_run(first_run_id)
-    assert run.data.tags.get(AGENT_RUN_TAG) == "true", "DominoAgentContext should tag the resumed run"
+    assert (
+        run.data.tags.get(AGENT_RUN_TAG) == "true"
+    ), "DominoAgentContext should tag the resumed run"
diff --git a/tests/integration/agents/test_logging.py b/tests/integration/agents/test_logging.py
index 53b23556..e337c4ee 100644
--- a/tests/integration/agents/test_logging.py
+++ b/tests/integration/agents/test_logging.py
@@ -1,6 +1,7 @@
 import pytest
 
 from domino.agents._eval_tags import InvalidEvaluationLabelException
+
 from .mlflow_fixtures import fixture_create_traces
 
 
@@ -11,26 +12,34 @@ def test_log_evaluation_dev(setup_mlflow_tracking_server, mlflow, logging):
     fixture_create_traces()
 
     # log evaluations to traces
-    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id],
+        filter_string="trace.name = 'test_add'",
+        return_type="list",
+    )
     for trace in traces:
         logging.log_evaluation(
-                trace.info.trace_id,
-                value=1,
-                name="helpfulness",
+            trace.info.trace_id,
+            value=1,
+            name="helpfulness",
         )
         logging.log_evaluation(
-                trace.info.trace_id,
-                value="dogs",
-                name="category",
+            trace.info.trace_id,
+            value="dogs",
+            name="category",
         )
 
     # verify tags on traces
-    tagged_traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type="list")
+    tagged_traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id],
+        filter_string="trace.name = 'test_add'",
+        return_type="list",
+    )
     tags = tagged_traces[0].info.tags
 
-    assert tags['domino.prog.label.category'] == 'dogs'
-    assert tags['domino.prog.metric.helpfulness'] == '1'
-    assert tags['domino.internal.is_eval'] == 'true'
+    assert tags["domino.prog.label.category"] == "dogs"
+    assert tags["domino.prog.metric.helpfulness"] == "1"
+    assert tags["domino.internal.is_eval"] == "true"
 
 
 def test_log_evaluation_invalid_name(setup_mlflow_tracking_server, mlflow, logging):
@@ -40,14 +49,18 @@ def test_log_evaluation_invalid_name(setup_mlflow_tracking_server, mlflow, loggi
     fixture_create_traces()
 
     # log evaluations to traces
-    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id],
+        filter_string="trace.name = 'test_add'",
+        return_type="list",
+    )
     trace = traces[0]
 
     with pytest.raises(InvalidEvaluationLabelException):
         logging.log_evaluation(
-                trace.info.trace_id,
-                value=1,
-                name="*",
+            trace.info.trace_id,
+            value=1,
+            name="*",
         )
 
 
@@ -61,12 +74,16 @@ def test_log_evaluation_non_string_float(setup_mlflow_tracking_server, mlflow, l
     fixture_create_traces()
 
     # log evaluations to traces
-    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], filter_string="trace.name = 'test_add'", return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id],
+        filter_string="trace.name = 'test_add'",
+        return_type="list",
+    )
     trace = traces[0]
 
     with pytest.raises(TypeError):
         logging.log_evaluation(
-                trace.info.trace_id,
-                value={},
-                name="myobject",
+            trace.info.trace_id,
+            value={},
+            name="myobject",
         )
diff --git a/tests/integration/agents/test_tracing.py b/tests/integration/agents/test_tracing.py
index 20fb80d4..adb5467b 100644
--- a/tests/integration/agents/test_tracing.py
+++ b/tests/integration/agents/test_tracing.py
@@ -1,21 +1,24 @@
 import asyncio
-from datetime import datetime, timedelta
 import inspect
 import logging as logger
 import os
-import pytest
 import threading
 import time
+from datetime import datetime, timedelta
 from unittest.mock import call, patch
 
-from ...conftest import TEST_AGENTS_ENV_VARS
+import pytest
+
 from domino.agents._constants import EXPERIMENT_AGENT_TAG
-from .mlflow_fixtures import fixture_create_prod_traces, add_prod_tags
-from .test_util import reset_prod_tracing
+from domino.agents._eval_tags import InvalidEvaluationLabelException
 from domino.agents.tracing._util import build_agent_experiment_name
+
 # NOTE: don't use this import to test public functions, use the tracing pytest fixture instead
 from domino.agents.tracing.tracing import _search_traces
-from domino.agents._eval_tags import InvalidEvaluationLabelException
+
+from ...conftest import TEST_AGENTS_ENV_VARS
+from .mlflow_fixtures import add_prod_tags, fixture_create_prod_traces
+from .test_util import reset_prod_tracing
 
 
 def test_init_tracing_prod(setup_mlflow_tracking_server, mocker, mlflow, tracing):
@@ -28,11 +31,15 @@ def test_init_tracing_prod(setup_mlflow_tracking_server, mocker, mlflow, tracing
     expected_experiment_name = build_agent_experiment_name(app_id)
     env_vars = TEST_AGENTS_ENV_VARS | test_case_vars
 
-    import domino.agents.tracing.tracing
-    import domino.agents._client
     import mlflow
+
+    import domino.agents._client
+    import domino.agents.tracing.tracing
+
     autolog_spy = mocker.spy(domino.agents.tracing.inittracing, "call_autolog")
-    set_experiment_tag_spy = mocker.spy(domino.agents._client.client, "set_experiment_tag")
+    set_experiment_tag_spy = mocker.spy(
+        domino.agents._client.client, "set_experiment_tag"
+    )
     set_experiment_spy = mocker.spy(mlflow, "set_experiment")
 
     reset_prod_tracing()
@@ -42,14 +49,20 @@ def test_init_tracing_prod(setup_mlflow_tracking_server, mocker, mlflow, tracing
         tracing.init_tracing(["sklearn"])
         found_exp = mlflow.get_experiment_by_name(expected_experiment_name)
 
-        assert autolog_spy.call_args_list == [call('sklearn')]
-        assert set_experiment_tag_spy.call_count == 1, "should only save tag on experiment once"
+        assert autolog_spy.call_args_list == [call("sklearn")]
+        assert (
+            set_experiment_tag_spy.call_count == 1
+        ), "should only save tag on experiment once"
         assert set_experiment_spy.call_count != 0, "should set an active experiment"
         assert found_exp is not None, "agent experiment should exist"
-        assert found_exp.tags.get(EXPERIMENT_AGENT_TAG) == "true", "agent experiment should be tagged"
+        assert (
+            found_exp.tags.get(EXPERIMENT_AGENT_TAG) == "true"
+        ), "agent experiment should be tagged"
 
 
-def test_init_tracing_logs_experiment_creation_debug(setup_mlflow_tracking_server, mlflow, tracing, caplog):
+def test_init_tracing_logs_experiment_creation_debug(
+    setup_mlflow_tracking_server, mlflow, tracing, caplog
+):
     """
     when log level is debug, verify the experiment creation log includes the experiment ID
     """
@@ -64,7 +77,9 @@ def test_init_tracing_logs_experiment_creation_debug(setup_mlflow_tracking_serve
         expected_experiment_name = build_agent_experiment_name(app_id)
         exp = mlflow.get_experiment_by_name(expected_experiment_name)
         assert exp is not None, "experiment should be created in prod mode"
-        assert f"Created experiment for Agent with ID {exp.experiment_id}" in caplog.text
+        assert (
+            f"Created experiment for Agent with ID {exp.experiment_id}" in caplog.text
+        )
 
 
 def test_logging_traces_prod(setup_mlflow_tracking_server, mocker, mlflow, tracing):
@@ -100,15 +115,23 @@ def b(num):
         t2.join()
 
     # a and b traces should all be in the agent experiment
-    traces_a = mlflow.search_traces(filter_string="trace.name = 'a'", return_type='list')
-    traces_b = mlflow.search_traces(filter_string="trace.name = 'b'", return_type='list')
+    traces_a = mlflow.search_traces(
+        filter_string="trace.name = 'a'", return_type="list"
+    )
+    traces_b = mlflow.search_traces(
+        filter_string="trace.name = 'b'", return_type="list"
+    )
 
     def get_experiment_id(trace):
         return trace.info.trace_location.mlflow_experiment.experiment_id
 
     found_exp_ids = set([get_experiment_id(t) for t in traces_a + traces_b])
-    actual_exp_id = set([mlflow.get_experiment_by_name(expected_experiment_name).experiment_id])
-    assert found_exp_ids == actual_exp_id, "traces should be linked to the agent experiment"
+    actual_exp_id = set(
+        [mlflow.get_experiment_by_name(expected_experiment_name).experiment_id]
+    )
+    assert (
+        found_exp_ids == actual_exp_id
+    ), "traces should be linked to the agent experiment"
 
 
 def test_inline_evaluators_should_not_run_prod(setup_mlflow_tracking_server, tracing):
@@ -121,15 +144,21 @@ def test_inline_evaluators_should_not_run_prod(setup_mlflow_tracking_server, tra
 
     reset_prod_tracing()
 
-    @tracing.add_tracing(name="span_unit", evaluator=lambda span: {'span_result': 1})
+    @tracing.add_tracing(name="span_unit", evaluator=lambda span: {"span_result": 1})
     def span_unit(x):
         return x
 
-    @tracing.add_tracing(name="trace_unit", trace_evaluator=lambda trace: {'trace_result': 1})
+    @tracing.add_tracing(
+        name="trace_unit", trace_evaluator=lambda trace: {"trace_result": 1}
+    )
     def trace_unit(x):
         return x
 
-    @tracing.add_tracing(name="trace_and_unit", evaluator=lambda span: {'both_span_result': 1}, trace_evaluator=lambda trace: {'both_trace_result': 1})
+    @tracing.add_tracing(
+        name="trace_and_unit",
+        evaluator=lambda span: {"both_span_result": 1},
+        trace_evaluator=lambda trace: {"both_trace_result": 1},
+    )
     def trace_and_unit(x):
         return x
 
@@ -153,9 +182,13 @@ def test_init_tracing_dev_mode(setup_mlflow_tracking_server, mocker, mlflow, tra
     """
     should not create an experiment or set tags
     """
-    import domino.agents._client
     import mlflow
-    set_experiment_tag_spy = mocker.spy(domino.agents._client.client, "set_experiment_tag")
+
+    import domino.agents._client
+
+    set_experiment_tag_spy = mocker.spy(
+        domino.agents._client.client, "set_experiment_tag"
+    )
     set_experiment_spy = mocker.spy(mlflow, "set_experiment")
 
     with patch.dict(os.environ, TEST_AGENTS_ENV_VARS, clear=True):
@@ -165,7 +198,9 @@ def test_init_tracing_dev_mode(setup_mlflow_tracking_server, mocker, mlflow, tra
         assert set_experiment_spy.call_count == 0, "should not set an active experiment"
 
 
-def test_add_tracing_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+def test_add_tracing_dev(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
     """
     add_tracing will create a new trace with a given name
     and attach evaluation tags to the trace
@@ -174,60 +209,82 @@ def test_add_tracing_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing,
     # so that mocker works
     exp = mlflow.set_experiment("test_add_tracing_dev")
 
-    @tracing.add_tracing(name="add_numbers", autolog_frameworks=["sklearn"], evaluator=lambda span: {'result': span.outputs})
+    @tracing.add_tracing(
+        name="add_numbers",
+        autolog_frameworks=["sklearn"],
+        evaluator=lambda span: {"result": span.outputs},
+    )
     def add_numbers(x, y):
         return x + y
 
     with logging.DominoRun("test_add_tracing_dev"):
         add_numbers(1, 1)
 
-    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type="list")
     assert len(ts) == 1, "only one trace should be created"
 
     # assert tags
     tags = ts[0].info.tags
-    assert tags['domino.prog.metric.result'] == '2'
-    assert tags['domino.internal.is_eval'] == 'true'
+    assert tags["domino.prog.metric.result"] == "2"
+    assert tags["domino.internal.is_eval"] == "true"
 
 
-def test_add_tracing_dev_use_trace_in_evaluator(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging, caplog):
+def test_add_tracing_dev_use_trace_in_evaluator(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging, caplog
+):
     """
     User can access a trace in an inline evaluator on the trace parent, and not the child
     """
-    exp = mlflow.set_experiment("test_add_tracing_dev_use_trace_in_evaluator")
+    mlflow.set_experiment("test_add_tracing_dev_use_trace_in_evaluator")
 
-    @tracing.add_tracing(name="parent", evaluator=lambda span: {'span_exists_parent': 'True'}, trace_evaluator=lambda trace: {'trace_exists_parent': 'True'})
+    @tracing.add_tracing(
+        name="parent",
+        evaluator=lambda span: {"span_exists_parent": "True"},
+        trace_evaluator=lambda trace: {"trace_exists_parent": "True"},
+    )
     def parent(x):
         return unit(x)
 
     def child_trace_evaluator(trace):
-        return {'trace_exists_child': 'True'}
+        return {"trace_exists_child": "True"}
 
-    @tracing.add_tracing(name="unit", evaluator=lambda span: {'span_exists_child': 'True'}, trace_evaluator=child_trace_evaluator)
+    @tracing.add_tracing(
+        name="unit",
+        evaluator=lambda span: {"span_exists_child": "True"},
+        trace_evaluator=child_trace_evaluator,
+    )
     def unit(x):
         return x
 
     with logging.DominoRun() as run, caplog.at_level(logger.WARNING):
         parent(1)
 
-    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[
+        0
+    ]
 
     evals = {r.name: r.value for r in parent_t.evaluation_results}
-    assert evals.get('trace_exists_parent') == 'True'
-    assert 'trace_exists_child' not in evals
-    assert evals.get('span_exists_parent') == 'True'
-    assert evals.get('span_exists_child') == 'True'
-    assert "A trace_evaluator child_trace_evaluator was provided, but the trace could not be found" in caplog.text
+    assert evals.get("trace_exists_parent") == "True"
+    assert "trace_exists_child" not in evals
+    assert evals.get("span_exists_parent") == "True"
+    assert evals.get("span_exists_child") == "True"
+    assert (
+        "A trace_evaluator child_trace_evaluator was provided, but the trace could not be found"
+        in caplog.text
+    )
 
 
 def test_add_tracing_invalid_label(setup_mlflow_tracking_server, tracing):
     with pytest.raises(InvalidEvaluationLabelException):
+
         @tracing.add_tracing(name="*")
         def unit(x):
             return x
 
 
-def test_add_tracing_dev_no_evaluator(setup_mlflow_tracking_server, mlflow, tracing, logging):
+def test_add_tracing_dev_no_evaluator(
+    setup_mlflow_tracking_server, mlflow, tracing, logging
+):
     """
     add_tracing will create a new trace not add evaluations
     """
@@ -241,13 +298,15 @@ def add_numbers(x, y):
         add_numbers(1, 1)
 
     # assert tags
-    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    ts = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type="list")
     tags = ts[0].info.tags
 
-    assert 'domino.internal.is_eval' not in tags
+    assert "domino.internal.is_eval" not in tags
 
 
-def test_add_tracing_decorator_preserves_function_info(setup_mlflow_tracking_server, tracing):
+def test_add_tracing_decorator_preserves_function_info(
+    setup_mlflow_tracking_server, tracing
+):
     def func_with_args(a: int, b: int, c: int = 10, *args, **kwargs):
         """Function with various parameter types."""
         return a + b + c
@@ -260,19 +319,24 @@ def decorated_func(a: int, b: int, c: int = 10, *args, **kwargs):
     original_sig = inspect.signature(func_with_args)
     decorated_sig = inspect.signature(decorated_func)
 
-    assert decorated_func.__name__ == "decorated_func", "the function name should be preserved by the decorator"
-    assert decorated_func.__doc__ == "returns the input value", "the function docstring should be preserved by the decorator"
+    assert (
+        decorated_func.__name__ == "decorated_func"
+    ), "the function name should be preserved by the decorator"
+    assert (
+        decorated_func.__doc__ == "returns the input value"
+    ), "the function docstring should be preserved by the decorator"
     assert decorated_func.__module__ == "tests.integration.agents.test_tracing"
     assert decorated_sig == original_sig
-    assert list(decorated_sig.parameters.keys()) == ['a', 'b', 'c', 'args', 'kwargs']
-    assert decorated_sig.parameters['c'].default == 10
-    assert decorated_sig.parameters['a'].annotation == int
+    assert list(decorated_sig.parameters.keys()) == ["a", "b", "c", "args", "kwargs"]
+    assert decorated_sig.parameters["c"].default == 10
+    assert decorated_sig.parameters["a"].annotation == int
 
 
 def test_add_tracing_preseves_self_and_cls(setup_mlflow_tracking_server, tracing):
     """
     add_tracing should preserve self and cls for functionality of the decorated method
     """
+
     class MyClass:
         class_value = 2
 
@@ -294,7 +358,9 @@ def class_method(cls, x):
     assert MyClass.class_method(1) == 3
 
 
-def test_add_tracing_arguments_passed_to_span(setup_mlflow_tracking_server, tracing, mlflow):
+def test_add_tracing_arguments_passed_to_span(
+    setup_mlflow_tracking_server, tracing, mlflow
+):
     """
     add_tracing should preserve self and cls for functionality of the decorated method,
     but should not pass them as inputs to the trace.
@@ -329,51 +395,80 @@ def fun_with_defaults(x=10):
     args_kwargs(1, y=2)
     fun_with_defaults()
 
-    instance_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'instance_method'")[0]
-    class_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'class_method'")[0]
-    args_kwargs_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'args_kwargs'")[0]
-    fun_with_defaults_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'fun_with_defaults'")[0]
+    instance_trace = mlflow.search_traces(
+        experiment_ids=[experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'instance_method'",
+    )[0]
+    class_trace = mlflow.search_traces(
+        experiment_ids=[experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'class_method'",
+    )[0]
+    args_kwargs_trace = mlflow.search_traces(
+        experiment_ids=[experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'args_kwargs'",
+    )[0]
+    fun_with_defaults_trace = mlflow.search_traces(
+        experiment_ids=[experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'fun_with_defaults'",
+    )[0]
 
     def get_inputs(trace):
         return trace.data.spans[0].inputs
 
     it_inputs = get_inputs(instance_trace)
-    assert it_inputs == {'x': 1}
+    assert it_inputs == {"x": 1}
 
     ct_inputs = get_inputs(class_trace)
-    assert ct_inputs == {'x': 1}
+    assert ct_inputs == {"x": 1}
 
     ak_inputs = get_inputs(args_kwargs_trace)
-    assert ak_inputs == {'args': [1], 'kwargs': {'y': 2}}
+    assert ak_inputs == {"args": [1], "kwargs": {"y": 2}}
 
     d_inputs = get_inputs(fun_with_defaults_trace)
-    assert d_inputs == {'x': 10}
+    assert d_inputs == {"x": 10}
 
 
-def test_add_tracing_failed_inline_evaluator_logs_warning(setup_mlflow_tracking_server, tracing, mlflow, caplog):
+def test_add_tracing_failed_inline_evaluator_logs_warning(
+    setup_mlflow_tracking_server, tracing, mlflow, caplog
+):
     """
     if the inline evaluator fails, a warning is logged and the main code still executes
     """
     mlflow.set_experiment("test_add_tracing_failed_inline_evaluator_logs_warning")
 
     def failing_trace_evaluator(t):
-        return 1/0
+        return 1 / 0
 
     def failing_evaluator(span):
-        return 1/0
+        return 1 / 0
 
-    @tracing.add_tracing(name="unit", evaluator=failing_evaluator, trace_evaluator=failing_trace_evaluator)
+    @tracing.add_tracing(
+        name="unit",
+        evaluator=failing_evaluator,
+        trace_evaluator=failing_trace_evaluator,
+    )
     def unit(x):
         return x
 
     with mlflow.start_run(), caplog.at_level(logger.ERROR):
         assert unit(1) == 1
         print(caplog.text)
-        assert "Inline evaluation failed for evaluator, failing_evaluator" in caplog.text
-        assert "Inline evaluation failed for trace_evaluator, failing_trace_evaluator" in caplog.text
+        assert (
+            "Inline evaluation failed for evaluator, failing_evaluator" in caplog.text
+        )
+        assert (
+            "Inline evaluation failed for trace_evaluator, failing_trace_evaluator"
+            in caplog.text
+        )
 
 
-def test_add_tracing_works_with_generator(setup_mlflow_tracking_server, tracing, mlflow):
+def test_add_tracing_works_with_generator(
+    setup_mlflow_tracking_server, tracing, mlflow
+):
     """
     add_tracing should not record all result from a generator if not specified
     if we don't eagerly load the reults onto one trace, we save a span for each yield
@@ -381,7 +476,11 @@ def test_add_tracing_works_with_generator(setup_mlflow_tracking_server, tracing,
     exp = mlflow.set_experiment("test_add_tracing_works_with_generator")
     experiment_id = exp.experiment_id
 
-    @tracing.add_tracing(name="gen", evaluator=lambda span: {'result': 1}, eagerly_evaluate_streamed_results=False)
+    @tracing.add_tracing(
+        name="gen",
+        evaluator=lambda span: {"result": 1},
+        eagerly_evaluate_streamed_results=False,
+    )
     def gen():
         for i in range(3):
             yield i
@@ -389,54 +488,84 @@ def gen():
     xs = [x for x in gen()]
     assert xs == [0, 1, 2], "Results should be unaffected by tracing"
 
-    gen_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'gen'")[0]
-    assert len(gen_trace.data.spans) == 4, "should have 4 spans, one for function call, and one for each yield"
-    assert [s.outputs for s in gen_trace.data.spans[1:]] == [0, 1, 2], "yields spans should have correct outputs"
-    assert ["group_id" in s.attributes for s in gen_trace.data.spans[1:]] == [True, True, True], "yields spans should have a group_id attribute"
+    gen_trace = mlflow.search_traces(
+        experiment_ids=[experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'gen'",
+    )[0]
+    assert (
+        len(gen_trace.data.spans) == 4
+    ), "should have 4 spans, one for function call, and one for each yield"
+    assert [s.outputs for s in gen_trace.data.spans[1:]] == [
+        0,
+        1,
+        2,
+    ], "yields spans should have correct outputs"
+    assert ["group_id" in s.attributes for s in gen_trace.data.spans[1:]] == [
+        True,
+        True,
+        True,
+    ], "yields spans should have a group_id attribute"
     assert [s.attributes["index"] for s in gen_trace.data.spans[1:]] == [0, 1, 2]
-    assert len(set([s.attributes["group_id"] for s in gen_trace.data.spans[1:]])) == 1, "group_id should be the same for all yields"
+    assert (
+        len(set([s.attributes["group_id"] for s in gen_trace.data.spans[1:]])) == 1
+    ), "group_id should be the same for all yields"
 
     # assert evaluation didn't happen inline
     tags = gen_trace.info.tags
-    assert 'domino.prog.metric.result' not in tags
-    assert 'domino.internal.is_eval' not in tags
+    assert "domino.prog.metric.result" not in tags
+    assert "domino.internal.is_eval" not in tags
 
 
-def test_add_tracing_generator_trace_in_evaluator(setup_mlflow_tracking_server, tracing, mlflow, logging):
+def test_add_tracing_generator_trace_in_evaluator(
+    setup_mlflow_tracking_server, tracing, mlflow, logging
+):
     """
     When using a generator, the trace should be accessible in the parent generator function's evaluator,
     but not the child span's evaluator
     """
-    exp = mlflow.set_experiment("test_add_tracing_generator_trace_in_evaluator")
-    experiment_id = exp.experiment_id
 
-    @tracing.add_tracing(name="parent", evaluator=lambda span: {'span_exists_parent': 'True'}, trace_evaluator=lambda trace: {'trace_exists_parent': 'True'})
+    @tracing.add_tracing(
+        name="parent",
+        evaluator=lambda span: {"span_exists_parent": "True"},
+        trace_evaluator=lambda trace: {"trace_exists_parent": "True"},
+    )
     def parent():
         yield from child(1)
 
-    @tracing.add_tracing(name="child", evaluator=lambda span: {'span_exists_child': 'True'}, trace_evaluator=lambda trace: {'trace_exists_child': 'True'})
+    @tracing.add_tracing(
+        name="child",
+        evaluator=lambda span: {"span_exists_child": "True"},
+        trace_evaluator=lambda trace: {"trace_exists_child": "True"},
+    )
     def child(x):
         yield x
 
     with logging.DominoRun() as run:
         [_ for _ in parent()]
 
-    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[
+        0
+    ]
     evals = {r.name: r.value for r in parent_t.evaluation_results}
-    assert evals.get('trace_exists_parent') == 'True'
-    assert 'trace_exists_child' not in evals
-    assert evals.get('span_exists_parent') == 'True'
-    assert evals.get('span_exists_child') == 'True'
+    assert evals.get("trace_exists_parent") == "True"
+    assert "trace_exists_child" not in evals
+    assert evals.get("span_exists_parent") == "True"
+    assert evals.get("span_exists_child") == "True"
 
 
-def test_add_tracing_works_with_eagerly_evaluated_generator(setup_mlflow_tracking_server, tracing, mlflow):
+def test_add_tracing_works_with_eagerly_evaluated_generator(
+    setup_mlflow_tracking_server, tracing, mlflow
+):
     """
     add_tracing should record the result from a generator and evaluate it inline
     """
-    exp = mlflow.set_experiment("test_add_tracing_works_with_eagerly_evaluated_generator")
+    exp = mlflow.set_experiment(
+        "test_add_tracing_works_with_eagerly_evaluated_generator"
+    )
     experiment_id = exp.experiment_id
 
-    @tracing.add_tracing(name="gen_record_all", evaluator=lambda span: {'result': 1})
+    @tracing.add_tracing(name="gen_record_all", evaluator=lambda span: {"result": 1})
     def gen_record_all():
         for i in range(3):
             yield i
@@ -444,59 +573,85 @@ def gen_record_all():
     xs = [x for x in gen_record_all()]
     assert xs == [0, 1, 2]
 
-    gen_trace = mlflow.search_traces(experiment_ids=[experiment_id], return_type='list', filter_string="trace.name = 'gen_record_all'")[0]
+    gen_trace = mlflow.search_traces(
+        experiment_ids=[experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'gen_record_all'",
+    )[0]
     span = gen_trace.data.spans[0]
     tags = gen_trace.info.tags
 
     assert len(gen_trace.data.spans) == 1
     assert span.outputs == [0, 1, 2]
-    assert tags['domino.prog.metric.result'] == '1'
-    assert tags['domino.internal.is_eval'] == 'true'
+    assert tags["domino.prog.metric.result"] == "1"
+    assert tags["domino.internal.is_eval"] == "true"
 
 
 @pytest.mark.asyncio
-async def test_add_tracing_works_with_async(setup_mlflow_tracking_server, mlflow, tracing):
+async def test_add_tracing_works_with_async(
+    setup_mlflow_tracking_server, mlflow, tracing
+):
     exp = mlflow.set_experiment("test_add_tracing_works_with_async")
 
-    @tracing.add_tracing(name="async_function", evaluator=lambda span: {'result': 1})
+    @tracing.add_tracing(name="async_function", evaluator=lambda span: {"result": 1})
     async def async_function(x):
         return x
 
     res = await async_function(1)
     assert res == 1
 
-    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id], return_type="list"
+    )
 
-    assert [t.data.spans[0].inputs for t in traces] == [{'x': 1}], "Inputs to trace should be the function arguments"
-    assert [t.data.spans[0].outputs for t in traces] == [1], "Outputs to trace should be the function return value"
+    assert [t.data.spans[0].inputs for t in traces] == [
+        {"x": 1}
+    ], "Inputs to trace should be the function arguments"
+    assert [t.data.spans[0].outputs for t in traces] == [
+        1
+    ], "Outputs to trace should be the function return value"
 
 
 @pytest.mark.asyncio
-async def test_add_tracing_async_trace_in_evaluator(setup_mlflow_tracking_server, mlflow, tracing, logging):
+async def test_add_tracing_async_trace_in_evaluator(
+    setup_mlflow_tracking_server, mlflow, tracing, logging
+):
     """
     When using async functions, the trace should be accessible in the parent function's evaluator
     but not the child function's evaluator
     """
-    exp = mlflow.set_experiment("test_add_tracing_async_trace_in_evaluator")
+    mlflow.set_experiment("test_add_tracing_async_trace_in_evaluator")
 
-    @tracing.add_tracing(name="parent", evaluator=lambda span: {'span_exists_parent': 'True'}, trace_evaluator=lambda trace: {'trace_exists_parent': 'True'})
+    @tracing.add_tracing(
+        name="parent",
+        evaluator=lambda span: {"span_exists_parent": "True"},
+        trace_evaluator=lambda trace: {"trace_exists_parent": "True"},
+    )
     async def parent(x):
         return await child(x)
 
-    @tracing.add_tracing(name="child", evaluator=lambda span: {'span_exists_child': 'True'}, trace_evaluator=lambda trace: {'trace_exists_child': 'True'})
+    @tracing.add_tracing(
+        name="child",
+        evaluator=lambda span: {"span_exists_child": "True"},
+        trace_evaluator=lambda trace: {"trace_exists_child": "True"},
+    )
     async def child(x):
         return x
 
     with logging.DominoRun() as run:
         await parent(1)
 
-    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
-    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[0]
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[
+        0
+    ]
+    parent_t = tracing.search_traces(run_id=run.info.run_id, trace_name="parent").data[
+        0
+    ]
     evals = {r.name: r.value for r in parent_t.evaluation_results}
-    assert evals.get('trace_exists_parent') == 'True'
-    assert 'trace_exists_child' not in evals
-    assert evals.get('span_exists_parent') == 'True'
-    assert evals.get('span_exists_child') == 'True'
+    assert evals.get("trace_exists_parent") == "True"
+    assert "trace_exists_child" not in evals
+    assert evals.get("span_exists_parent") == "True"
+    assert evals.get("span_exists_child") == "True"
 
 
 def test_search_traces(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
@@ -504,7 +659,9 @@ def test_search_traces(setup_mlflow_tracking_server, mocker, mlflow, tracing, lo
     def unit(x):
         return x
 
-    @tracing.add_tracing(name="parent", evaluator=lambda span: {'mymetric': 1, 'mylabel': 'category'})
+    @tracing.add_tracing(
+        name="parent", evaluator=lambda span: {"mymetric": 1, "mylabel": "category"}
+    )
     def parent(x, y):
         return unit(x) + unit(y)
 
@@ -520,18 +677,34 @@ def parent2(x):
         parent2(1)
 
     res = tracing.search_traces(run_id=run_id)
-    span_data = [(s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans]
+    span_data = [
+        (s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans
+    ]
 
     assert sorted([trace.name for trace in res.data]) == sorted(["parent", "parent2"])
-    assert sorted([(t.name, t.value) for trace in res.data for t in trace.evaluation_results if trace.name == "parent"]) \
-        == sorted([("mylabel", "category"), ("mymetric", 1.0)])
+    assert sorted(
+        [
+            (t.name, t.value)
+            for trace in res.data
+            for t in trace.evaluation_results
+            if trace.name == "parent"
+        ]
+    ) == sorted([("mylabel", "category"), ("mymetric", 1.0)])
     assert len(span_data) == 4
-    assert sorted(span_data, key=lambda x: x[0]) == sorted([("parent", {'x': 1, 'y': 2}, 3),
-                                                            ("parent2", {'x': 1}, 1), ("unit_1", {'x': 1}, 1), ("unit_2", {'x': 2}, 2)
-                                                            ], key=lambda x: x[0])
+    assert sorted(span_data, key=lambda x: x[0]) == sorted(
+        [
+            ("parent", {"x": 1, "y": 2}, 3),
+            ("parent2", {"x": 1}, 1),
+            ("unit_1", {"x": 1}, 1),
+            ("unit_2", {"x": 2}, 2),
+        ],
+        key=lambda x: x[0],
+    )
 
 
-def test_search_traces_time_filter_warning(setup_mlflow_tracking_server, tracing, mlflow, logging, caplog):
+def test_search_traces_time_filter_warning(
+    setup_mlflow_tracking_server, tracing, mlflow, logging, caplog
+):
     """
     if start time is > end time, warn the user
     """
@@ -541,11 +714,17 @@ def test_search_traces_time_filter_warning(setup_mlflow_tracking_server, tracing
         run_id = run.info.run_id
 
     with caplog.at_level(logger.WARNING):
-        tracing.search_traces(run_id=run_id, start_time=datetime.now(), end_time=datetime.now() - timedelta(seconds=10))
+        tracing.search_traces(
+            run_id=run_id,
+            start_time=datetime.now(),
+            end_time=datetime.now() - timedelta(seconds=10),
+        )
         assert "start_time must be before end_time" in caplog.text
 
 
-def test_search_traces_by_trace_name(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+def test_search_traces_by_trace_name(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
     @tracing.add_tracing(name="unit")
     def unit(x):
         return x
@@ -566,15 +745,25 @@ def parent2(x):
         parent2(1)
 
     res = tracing.search_traces(run_id=run_id, trace_name="parent")
-    span_data = [(s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans]
+    span_data = [
+        (s.name, s.inputs, s.outputs) for trace in res.data for s in trace.spans
+    ]
 
     assert [trace.name for trace in res.data] == ["parent"]
     assert len(span_data) == 3
-    assert sorted(span_data, key=lambda x: x[0]) == sorted([("parent", {'x': 1, 'y': 2}, 3),
-                                                            ("unit_1", {'x': 1}, 1), ("unit_2", {'x': 2}, 2)], key=lambda x: x[0])
+    assert sorted(span_data, key=lambda x: x[0]) == sorted(
+        [
+            ("parent", {"x": 1, "y": 2}, 3),
+            ("unit_1", {"x": 1}, 1),
+            ("unit_2", {"x": 2}, 2),
+        ],
+        key=lambda x: x[0],
+    )
 
 
-def test_search_traces_by_timestamp(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+def test_search_traces_by_timestamp(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
     @tracing.add_tracing(name="parent")
     def parent(x):
         return x
@@ -597,23 +786,26 @@ def parent(x):
     end_time = datetime.now() - timedelta(seconds=2)
 
     res = tracing.search_traces(
-            run_id=run_id,
-            trace_name="parent",
-            start_time=start_time,
-            end_time=end_time
+        run_id=run_id, trace_name="parent", start_time=start_time, end_time=end_time
     )
 
     assert [trace.name for trace in res.data] == ["parent"]
-    assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res.data] == [[("parent", 2, 2)]]
+    assert [
+        [(s.name, s.inputs["x"], s.outputs) for s in trace.spans] for trace in res.data
+    ] == [[("parent", 2, 2)]]
 
 
-def test_search_traces_with_traces_made_2hrs_ago(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+def test_search_traces_with_traces_made_2hrs_ago(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
     exp = mlflow.set_experiment("test_search_traces_with_traces_made_2hrs_ago")
 
     def parent(x):
         dt = datetime.now() - timedelta(hours=2)
         ns = int(dt.timestamp() * 1e9)
-        span = mlflow.start_span_no_context(name="parent", inputs=1, experiment_id=exp.experiment_id, start_time_ns=ns)
+        span = mlflow.start_span_no_context(
+            name="parent", inputs=1, experiment_id=exp.experiment_id, start_time_ns=ns
+        )
         span.end()
         return x
 
@@ -623,23 +815,25 @@ def parent(x):
         parent(1)
 
     res = tracing.search_traces(
-            run_id=run_id,
-            trace_name="parent",
+        run_id=run_id,
+        trace_name="parent",
     )
 
     assert [trace.name for trace in res.data] == ["parent"]
 
     # If i shorten the time filter, I get no results
     recent_res = tracing.search_traces(
-            run_id=run_id,
-            trace_name="parent",
-            start_time=datetime.now() - timedelta(hours=1),
+        run_id=run_id,
+        trace_name="parent",
+        start_time=datetime.now() - timedelta(hours=1),
     )
     assert recent_res.data == []
 
 
-def test_search_traces_multiple_runs_in_exp(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
-    exp = mlflow.set_experiment("test_search_traces_multiple_runs_in_exp")
+def test_search_traces_multiple_runs_in_exp(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
+    mlflow.set_experiment("test_search_traces_multiple_runs_in_exp")
 
     @tracing.add_tracing(name="unit1")
     def unit1(x):
@@ -662,7 +856,9 @@ def unit2(x):
     assert [trace.name for trace in res.data] == ["unit1"]
 
 
-def test_search_traces_agent(setup_mlflow_tracking_server_no_env_var_mock, mlflow, tracing):
+def test_search_traces_agent(
+    setup_mlflow_tracking_server_no_env_var_mock, mlflow, tracing
+):
     """
     Can filter by agent id alone or id and version
     """
@@ -677,16 +873,29 @@ def get_trace_names(traces):
         return sorted([trace.name for trace in traces.data])
 
     all_traces = tracing.search_agent_traces(agent_id=app_id)
-    assert get_trace_names(all_traces) == ["one", "two"], "Can get traces for all agent versions"
+    assert get_trace_names(all_traces) == [
+        "one",
+        "two",
+    ], "Can get traces for all agent versions"
 
-    v1_traces = tracing.search_agent_traces(agent_id=app_id, agent_version=app_version_1)
-    assert get_trace_names(v1_traces) == ["one"], "Can get traces for just agent version 1"
+    v1_traces = tracing.search_agent_traces(
+        agent_id=app_id, agent_version=app_version_1
+    )
+    assert get_trace_names(v1_traces) == [
+        "one"
+    ], "Can get traces for just agent version 1"
 
-    v2_traces = tracing.search_agent_traces(agent_id=app_id, agent_version=app_version_2)
-    assert get_trace_names(v2_traces) == ["two"], "Can get traces for just agent version 2"
+    v2_traces = tracing.search_agent_traces(
+        agent_id=app_id, agent_version=app_version_2
+    )
+    assert get_trace_names(v2_traces) == [
+        "two"
+    ], "Can get traces for just agent version 2"
 
 
-def test_search_traces_agent_agent_id_required(setup_mlflow_tracking_server_no_env_var_mock):
+def test_search_traces_agent_agent_id_required(
+    setup_mlflow_tracking_server_no_env_var_mock,
+):
     """
     agent id is required if version supplied
     """
@@ -694,10 +903,14 @@ def test_search_traces_agent_agent_id_required(setup_mlflow_tracking_server_no_e
     with pytest.raises(Exception) as e_info:
         _search_traces(agent_version="fakeversion")
 
-    assert "agent_id must also be provided" in str(e_info), "Should raise if version provided without id"
+    assert "agent_id must also be provided" in str(
+        e_info
+    ), "Should raise if version provided without id"
 
 
-def test_search_traces_no_run_agent_ids_supplied(setup_mlflow_tracking_server_no_env_var_mock, tracing):
+def test_search_traces_no_run_agent_ids_supplied(
+    setup_mlflow_tracking_server_no_env_var_mock, tracing
+):
     """
     should throw if no run id, agent version, or id supplied
     """
@@ -705,11 +918,15 @@ def test_search_traces_no_run_agent_ids_supplied(setup_mlflow_tracking_server_no
     with pytest.raises(Exception) as e_info:
         _search_traces()
 
-    assert "Either run_id or agent_id and agent_version must be provided to search traces" in str(e_info), \
-        "Should raise no agent info or run info provided"
+    assert (
+        "Either run_id or agent_id and agent_version must be provided to search traces"
+        in str(e_info)
+    ), "Should raise no agent info or run info provided"
 
 
-def test_search_traces_filters_should_work_together_dev(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+def test_search_traces_filters_should_work_together_dev(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
     """
     When every filter is specified as well as pagination, the expected results should be returned
     The test creates multiple differently named traces over the course of a few hours in an experiment
@@ -724,7 +941,9 @@ def unit1(x):
     def create_span_at_time(name: str, inputs: int, hours_ago: int):
         dt = datetime.now() - timedelta(hours=hours_ago)
         ns = int(dt.timestamp() * 1e9)
-        span = mlflow.start_span_no_context(name=name, inputs=inputs, experiment_id=exp.experiment_id, start_time_ns=ns)
+        span = mlflow.start_span_no_context(
+            name=name, inputs=inputs, experiment_id=exp.experiment_id, start_time_ns=ns
+        )
         span.end()
 
     @tracing.add_tracing(name="sum1")
@@ -755,12 +974,12 @@ def unit2(x):
 
     def get_traces(next_page_token):
         return tracing.search_traces(
-                run_id=run_1_id,
-                trace_name="sum1",
-                start_time=start_time,
-                end_time=end_time,
-                page_token=next_page_token,
-                max_results=1
+            run_id=run_1_id,
+            trace_name="sum1",
+            start_time=start_time,
+            end_time=end_time,
+            page_token=next_page_token,
+            max_results=1,
         )
 
     def get_span_data(page):
@@ -775,7 +994,9 @@ def get_span_data(page):
     assert get_span_data(page2) == [("sum1", [3])], "Should return second call"
 
 
-def test_search_traces_filters_should_work_together_prod(setup_mlflow_tracking_server_no_env_var_mock, mocker, mlflow, tracing, logging):
+def test_search_traces_filters_should_work_together_prod(
+    setup_mlflow_tracking_server_no_env_var_mock, mocker, mlflow, tracing, logging
+):
     """
     When searching by agent ID and version and when every filter is specified as well as pagination,
     the expected results should be returned
@@ -800,13 +1021,13 @@ def test_search_traces_filters_should_work_together_prod(setup_mlflow_tracking_s
 
     def get_traces(next_page_token):
         return tracing.search_agent_traces(
-                agent_id=app_id,
-                agent_version=app_version_1,
-                trace_name="sum1",
-                start_time=start_time,
-                end_time=end_time,
-                page_token=next_page_token,
-                max_results=1
+            agent_id=app_id,
+            agent_version=app_version_1,
+            trace_name="sum1",
+            start_time=start_time,
+            end_time=end_time,
+            page_token=next_page_token,
+            max_results=1,
         )
 
     def get_span_data(page):
@@ -821,11 +1042,14 @@ def get_span_data(page):
     assert get_span_data(page2) == [("sum1", [2])], "Should return second call"
 
 
-def test_search_traces_pagination(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+def test_search_traces_pagination(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
     """
     The api should provide a page token in if the total number of results is bigger than the max results
     and you can use that token to get the next page of results
     """
+
     @tracing.add_tracing(name="parent")
     def parent(x):
         return x
@@ -838,22 +1062,26 @@ def parent(x):
         parent(2)
 
     res1 = tracing.search_traces(
-            run_id=run_id,
-            max_results=1,
+        run_id=run_id,
+        max_results=1,
     )
 
-    assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res1.data] == [[("parent", 1, 1)]]
+    assert [
+        [(s.name, s.inputs["x"], s.outputs) for s in trace.spans] for trace in res1.data
+    ] == [[("parent", 1, 1)]]
 
     res2 = tracing.search_traces(
-            run_id=run_id,
-            max_results=1,
-            page_token=res1.page_token
+        run_id=run_id, max_results=1, page_token=res1.page_token
     )
 
-    assert [[(s.name, s.inputs['x'], s.outputs) for s in trace.spans] for trace in res2.data] == [[("parent", 2, 2)]]
+    assert [
+        [(s.name, s.inputs["x"], s.outputs) for s in trace.spans] for trace in res2.data
+    ] == [[("parent", 2, 2)]]
 
 
-def test_search_traces_from_lazy_generator(setup_mlflow_tracking_server, mocker, mlflow, tracing, logging):
+def test_search_traces_from_lazy_generator(
+    setup_mlflow_tracking_server, mocker, mlflow, tracing, logging
+):
     @tracing.add_tracing(name="parent", eagerly_evaluate_streamed_results=False)
     def parent():
         for i in range(3):
@@ -867,21 +1095,26 @@ def parent():
         [x for x in parent()]
 
     traces = tracing.search_traces(
-            run_id=run_id,
+        run_id=run_id,
     )
 
     assert len(traces.data) == 1
     assert len(traces.data[0].spans) == 4
 
 
-def test_init_tracing_triggers_one_get_experiment_by_name_calls_in_threads(setup_mlflow_tracking_server, mlflow, tracing):
+def test_init_tracing_triggers_one_get_experiment_by_name_calls_in_threads(
+    setup_mlflow_tracking_server, mlflow, tracing
+):
     """
     init_tracing should call mlflow.set_experiment once
     when invoked concurrently from two threads and traces should go to the
     right experiment anyway
     """
     app_id = "concurrency_app"
-    env_vars = TEST_AGENTS_ENV_VARS | {"DOMINO_AGENT_IS_PROD": "true", "DOMINO_APP_ID": app_id}
+    env_vars = TEST_AGENTS_ENV_VARS | {
+        "DOMINO_AGENT_IS_PROD": "true",
+        "DOMINO_APP_ID": app_id,
+    }
     expected_experiment_name = build_agent_experiment_name(app_id)
 
     reset_prod_tracing()
@@ -899,9 +1132,9 @@ def do():
 
         # Spy on mlflow.set_experiment to ensure it is called once
         with patch.object(
-                mlflow,
-                "set_experiment",
-                wraps=mlflow.set_experiment,
+            mlflow,
+            "set_experiment",
+            wraps=mlflow.set_experiment,
         ) as spy_set_experiment:
             t1 = threading.Thread(target=send_traces)
             t2 = threading.Thread(target=send_traces)
@@ -911,22 +1144,28 @@ def do():
             t1.join()
             t2.join()
 
-            assert spy_set_experiment.call_count == 1, "set_experiment should be called once from init_tracing"
+            assert (
+                spy_set_experiment.call_count == 1
+            ), "set_experiment should be called once from init_tracing"
 
         # Verify two traces named "do" were saved to the Agent experiment
         exp = mlflow.get_experiment_by_name(expected_experiment_name)
         traces = mlflow.search_traces(
-                experiment_ids=[exp.experiment_id],
-                filter_string="trace.name = 'do'",
-                return_type='list',
+            experiment_ids=[exp.experiment_id],
+            filter_string="trace.name = 'do'",
+            return_type="list",
         )
 
         # even though we don't re-initialize the experiment in both threads, the traces
         # still go to the right experiment
-        assert len(traces) == 2, "Two traces named 'do' should be saved to the experiment"
+        assert (
+            len(traces) == 2
+        ), "Two traces named 'do' should be saved to the experiment"
 
 
-def test_add_tracing_span_type_and_attributes(setup_mlflow_tracking_server, mlflow, tracing):
+def test_add_tracing_span_type_and_attributes(
+    setup_mlflow_tracking_server, mlflow, tracing
+):
     """
     add_tracing should support span_type and attributes parameters
     """
@@ -935,9 +1174,7 @@ def test_add_tracing_span_type_and_attributes(setup_mlflow_tracking_server, mlfl
     exp = mlflow.set_experiment("test_add_tracing_span_type_and_attributes")
 
     @tracing.add_tracing(
-        name="llm_call",
-        span_type=SpanType.LLM,
-        attributes={"model": "gpt-4"}
+        name="llm_call", span_type=SpanType.LLM, attributes={"model": "gpt-4"}
     )
     def llm_call(prompt):
         return f"Response to: {prompt}"
@@ -946,15 +1183,21 @@ def llm_call(prompt):
     result = llm_call("Hello")
     assert result == "Response to: Hello"
 
-    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id], return_type="list"
+    )
     assert len(traces) == 1, "Should create one trace"
 
     span = traces[0].data.spans[0]
     assert span.span_type == "LLM", "Span type should be set to LLM"
-    assert span.attributes.get("model") == "gpt-4", "Custom attribute 'model' should be set on the span"
+    assert (
+        span.attributes.get("model") == "gpt-4"
+    ), "Custom attribute 'model' should be set on the span"
 
 
-def test_add_tracing_span_type_with_async_and_generator(setup_mlflow_tracking_server, mlflow, tracing):
+def test_add_tracing_span_type_with_async_and_generator(
+    setup_mlflow_tracking_server, mlflow, tracing
+):
     """
     span_type and attributes should work with async and generator functions
     """
@@ -965,15 +1208,12 @@ def test_add_tracing_span_type_with_async_and_generator(setup_mlflow_tracking_se
     @tracing.add_tracing(
         name="async_retriever",
         span_type=SpanType.RETRIEVER,
-        attributes={"index": "vector_db"}
+        attributes={"index": "vector_db"},
     )
     async def async_retriever(query):
         return [f"doc_{query}"]
 
-    @tracing.add_tracing(
-        name="generator_chain",
-        span_type=SpanType.CHAIN
-    )
+    @tracing.add_tracing(name="generator_chain", span_type=SpanType.CHAIN)
     def generator_chain():
         for i in range(2):
             yield f"chunk_{i}"
@@ -986,20 +1226,34 @@ def generator_chain():
     assert gen_results == ["chunk_0", "chunk_1"]
 
     # Test traces were created
-    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id], return_type="list"
+    )
     assert len(traces) >= 2, "Should create at least two traces"
 
-    async_trace = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list', filter_string="trace.name = 'async_retriever'")[0]
+    async_trace = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'async_retriever'",
+    )[0]
     async_span = async_trace.data.spans[0]
     assert async_span.span_type == "RETRIEVER", "Async span type should be RETRIEVER"
-    assert async_span.attributes.get("index") == "vector_db", "Async span attribute 'index' should be set"
-
-    gen_trace = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list', filter_string="trace.name = 'generator_chain'")[0]
+    assert (
+        async_span.attributes.get("index") == "vector_db"
+    ), "Async span attribute 'index' should be set"
+
+    gen_trace = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id],
+        return_type="list",
+        filter_string="trace.name = 'generator_chain'",
+    )[0]
     gen_span = gen_trace.data.spans[0]
     assert gen_span.span_type == "CHAIN", "Generator span type should be CHAIN"
 
 
-def test_add_tracing_custom_span_type_string(setup_mlflow_tracking_server, mlflow, tracing):
+def test_add_tracing_custom_span_type_string(
+    setup_mlflow_tracking_server, mlflow, tracing
+):
     """
     add_tracing should accept custom span type strings
     """
@@ -1008,7 +1262,7 @@ def test_add_tracing_custom_span_type_string(setup_mlflow_tracking_server, mlflo
     @tracing.add_tracing(
         name="custom_operation",
         span_type="CUSTOM_OPERATION",
-        attributes={"operation_id": "op_123"}
+        attributes={"operation_id": "op_123"},
     )
     def custom_operation():
         return "custom result"
@@ -1017,9 +1271,15 @@ def custom_operation():
     result = custom_operation()
     assert result == "custom result"
 
-    traces = mlflow.search_traces(experiment_ids=[exp.experiment_id], return_type='list')
+    traces = mlflow.search_traces(
+        experiment_ids=[exp.experiment_id], return_type="list"
+    )
     assert len(traces) == 1, "Should create one trace"
 
     span = traces[0].data.spans[0]
-    assert span.span_type == "CUSTOM_OPERATION", "Custom span type string should be preserved"
-    assert span.attributes.get("operation_id") == "op_123", "Custom attribute 'operation_id' should be set on the span"
+    assert (
+        span.span_type == "CUSTOM_OPERATION"
+    ), "Custom span type string should be preserved"
+    assert (
+        span.attributes.get("operation_id") == "op_123"
+    ), "Custom attribute 'operation_id' should be set on the span"
diff --git a/tests/test_app.py b/tests/test_app.py
index 7b84656d..f71af1a5 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -47,7 +47,8 @@ def test_app_publish_with_branch(requests_mock, dummy_hostname):
     d.app_publish(app_id=MOCK_APP_ID, branch="my-feature-branch")
 
     app_start_request = next(
-        req for req in requests_mock.request_history
+        req
+        for req in requests_mock.request_history
         if req.path == f"/v4/modelproducts/{MOCK_APP_ID}/start"
     )
     assert app_start_request.json()["mainRepoGitRef"] == {
@@ -66,7 +67,8 @@ def test_app_publish_with_commit_id(requests_mock, dummy_hostname):
     d.app_publish(app_id=MOCK_APP_ID, commit_id="abc123def456")
 
     app_start_request = next(
-        req for req in requests_mock.request_history
+        req
+        for req in requests_mock.request_history
         if req.path == f"/v4/modelproducts/{MOCK_APP_ID}/start"
     )
     assert app_start_request.json()["mainRepoGitRef"] == {
@@ -85,7 +87,8 @@ def test_app_publish_omits_git_ref_when_not_provided(requests_mock, dummy_hostna
     d.app_publish(app_id=MOCK_APP_ID)
 
     app_start_request = next(
-        req for req in requests_mock.request_history
+        req
+        for req in requests_mock.request_history
         if req.path == f"/v4/modelproducts/{MOCK_APP_ID}/start"
     )
     assert "mainRepoGitRef" not in app_start_request.json()
@@ -111,7 +114,8 @@ def test_app_publish_unpublishes_running_app(requests_mock, dummy_hostname):
     d.app_publish(app_id=MOCK_APP_ID, unpublish_running_apps=True)
 
     stop_requests = [
-        req for req in requests_mock.request_history
+        req
+        for req in requests_mock.request_history
         if req.path == f"/v4/modelproducts/{MOCK_APP_ID}/stop"
     ]
     assert len(stop_requests) == 1
@@ -126,7 +130,8 @@ def test_app_publish_skips_unpublish_when_disabled(requests_mock, dummy_hostname
     d.app_publish(app_id=MOCK_APP_ID, unpublish_running_apps=False)
 
     stop_requests = [
-        req for req in requests_mock.request_history
+        req
+        for req in requests_mock.request_history
         if req.path == f"/v4/modelproducts/{MOCK_APP_ID}/stop"
     ]
     assert len(stop_requests) == 0
@@ -142,7 +147,8 @@ def test_app_publish_targets_specific_app_id(requests_mock, dummy_hostname):
     d.app_publish(app_id=MOCK_APP_ID)
 
     start_requests = [
-        req for req in requests_mock.request_history
+        req
+        for req in requests_mock.request_history
         if req.path == f"/v4/modelproducts/{MOCK_APP_ID}/start"
     ]
     assert len(start_requests) == 1
diff --git a/tests/test_apps.py b/tests/test_apps.py
index 2f1b6c10..67ce9088 100644
--- a/tests/test_apps.py
+++ b/tests/test_apps.py
@@ -2,6 +2,7 @@
 Unit tests for app publish/unpublish methods.
 All tests use requests_mock — no live Domino deployment required.
 """
+
 import pytest
 
 from domino import Domino
@@ -56,9 +57,7 @@ def test_app_publish_creates_app_when_none_exists(requests_mock, dummy_hostname)
         f"{dummy_hostname}/v4/modelProducts?projectId={MOCK_PROJECT_ID}", json=[]
     )
     # __app_create
-    requests_mock.post(
-        f"{dummy_hostname}/v4/modelProducts", json={"id": MOCK_APP_ID}
-    )
+    requests_mock.post(f"{dummy_hostname}/v4/modelProducts", json={"id": MOCK_APP_ID})
     # app_start
     requests_mock.post(
         f"{dummy_hostname}/v4/modelProducts/{MOCK_APP_ID}/start",
@@ -162,7 +161,9 @@ def test_app_unpublish_does_nothing_when_no_app_exists(requests_mock, dummy_host
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_app_unpublish_does_nothing_when_app_already_stopped(requests_mock, dummy_hostname):
+def test_app_unpublish_does_nothing_when_app_already_stopped(
+    requests_mock, dummy_hostname
+):
     requests_mock.get(
         f"{dummy_hostname}/v4/modelProducts?projectId={MOCK_PROJECT_ID}",
         json=[MOCK_APP],
diff --git a/tests/test_basic_auth.py b/tests/test_basic_auth.py
index ca932e14..82814de6 100644
--- a/tests/test_basic_auth.py
+++ b/tests/test_basic_auth.py
@@ -90,7 +90,9 @@ def test_object_creation_with_api_key():
     ), "Authentication using API key should be of type domino.authentication.ApiKeyAuth"
 
 
-@pytest.mark.usefixtures("mock_domino_version_response", "clear_token_file_from_env", "mock_proxy_response")
+@pytest.mark.usefixtures(
+    "mock_domino_version_response", "clear_token_file_from_env", "mock_proxy_response"
+)
 def test_object_creation_with_api_proxy():
     """
     Confirm that the expected auth type is used when using api proxy.
@@ -98,14 +100,20 @@ def test_object_creation_with_api_proxy():
     dummy_host = "http://domino.somefakecompany.com"
     dummy_api_proxy = "localhost:1234"
 
-    d = Domino(host=dummy_host, project="anyuser/quick-start", api_proxy=dummy_api_proxy)
+    d = Domino(
+        host=dummy_host, project="anyuser/quick-start", api_proxy=dummy_api_proxy
+    )
     assert isinstance(
         d.request_manager.auth, domino.authentication.ProxyAuth
     ), "Authentication using API proxy should be of type domino.authentication.ProxyAuth"
     assert d.request_manager.auth.api_proxy == "http://localhost:1234"
 
 
-@pytest.mark.usefixtures("mock_domino_version_response", "clear_token_file_from_env", "mock_proxy_response_https")
+@pytest.mark.usefixtures(
+    "mock_domino_version_response",
+    "clear_token_file_from_env",
+    "mock_proxy_response_https",
+)
 def test_object_creation_with_api_proxy_with_scheme():
     """
     Confirm that the expected auth type is used when using api proxy.
@@ -113,7 +121,9 @@ def test_object_creation_with_api_proxy_with_scheme():
     dummy_host = "http://domino.somefakecompany.com"
     dummy_api_proxy = "https://localhost:1234"
 
-    d = Domino(host=dummy_host, project="anyuser/quick-start", api_proxy=dummy_api_proxy)
+    d = Domino(
+        host=dummy_host, project="anyuser/quick-start", api_proxy=dummy_api_proxy
+    )
     assert isinstance(
         d.request_manager.auth, domino.authentication.ProxyAuth
     ), "Authentication using API proxy should be of type domino.authentication.ProxyAuth"
diff --git a/tests/test_collaborators.py b/tests/test_collaborators.py
index 68dabdd8..66ce9587 100644
--- a/tests/test_collaborators.py
+++ b/tests/test_collaborators.py
@@ -2,10 +2,10 @@
 Unit tests for collaborator API methods.
 All tests use requests_mock — no live Domino deployment required.
 """
+
 import pytest
 
-from domino import Domino
-from domino import exceptions
+from domino import Domino, exceptions
 
 MOCK_PROJECT_ID = "aabbccddeeff001122334455"
 MOCK_USER_ID = "aabbccddeeff001122334460"
diff --git a/tests/test_custom_metrics.py b/tests/test_custom_metrics.py
index 27ba3369..43bdcf6e 100644
--- a/tests/test_custom_metrics.py
+++ b/tests/test_custom_metrics.py
@@ -5,6 +5,7 @@
 integration-level setup. Our Bug 6 fix lives in _CustomMetricsClient.
 All tests use requests_mock — no live Domino deployment required.
 """
+
 import pytest
 
 from domino import Domino
@@ -33,7 +34,9 @@ def hand_rolled_client(requests_mock, dummy_hostname, base_mocks):
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_trigger_alert_payload_without_condition(requests_mock, dummy_hostname, hand_rolled_client):
+def test_trigger_alert_payload_without_condition(
+    requests_mock, dummy_hostname, hand_rolled_client
+):
     alert_mock = requests_mock.post(
         f"{dummy_hostname}/api/metricAlerts/v1", status_code=200
     )
@@ -50,7 +53,9 @@ def test_trigger_alert_payload_without_condition(requests_mock, dummy_hostname,
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_trigger_alert_payload_with_condition(requests_mock, dummy_hostname, hand_rolled_client):
+def test_trigger_alert_payload_with_condition(
+    requests_mock, dummy_hostname, hand_rolled_client
+):
     alert_mock = requests_mock.post(
         f"{dummy_hostname}/api/metricAlerts/v1", status_code=200
     )
@@ -69,7 +74,9 @@ def test_trigger_alert_payload_with_condition(requests_mock, dummy_hostname, han
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_trigger_alert_payload_with_condition_no_limits(requests_mock, dummy_hostname, hand_rolled_client):
+def test_trigger_alert_payload_with_condition_no_limits(
+    requests_mock, dummy_hostname, hand_rolled_client
+):
     alert_mock = requests_mock.post(
         f"{dummy_hostname}/api/metricAlerts/v1", status_code=200
     )
@@ -86,7 +93,9 @@ def test_trigger_alert_payload_with_condition_no_limits(requests_mock, dummy_hos
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_trigger_alert_includes_description_when_provided(requests_mock, dummy_hostname, hand_rolled_client):
+def test_trigger_alert_includes_description_when_provided(
+    requests_mock, dummy_hostname, hand_rolled_client
+):
     alert_mock = requests_mock.post(
         f"{dummy_hostname}/api/metricAlerts/v1", status_code=200
     )
@@ -101,7 +110,9 @@ def test_trigger_alert_includes_description_when_provided(requests_mock, dummy_h
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_trigger_alert_omits_description_when_not_provided(requests_mock, dummy_hostname, hand_rolled_client):
+def test_trigger_alert_omits_description_when_not_provided(
+    requests_mock, dummy_hostname, hand_rolled_client
+):
     alert_mock = requests_mock.post(
         f"{dummy_hostname}/api/metricAlerts/v1", status_code=200
     )
@@ -115,7 +126,9 @@ def test_trigger_alert_omits_description_when_not_provided(requests_mock, dummy_
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_log_metric_sends_correct_payload(requests_mock, dummy_hostname, hand_rolled_client):
+def test_log_metric_sends_correct_payload(
+    requests_mock, dummy_hostname, hand_rolled_client
+):
     log_mock = requests_mock.post(
         f"{dummy_hostname}/api/metricValues/v1", status_code=200
     )
@@ -134,7 +147,9 @@ def test_log_metric_sends_correct_payload(requests_mock, dummy_hostname, hand_ro
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_log_metric_includes_tags_when_provided(requests_mock, dummy_hostname, hand_rolled_client):
+def test_log_metric_includes_tags_when_provided(
+    requests_mock, dummy_hostname, hand_rolled_client
+):
     log_mock = requests_mock.post(
         f"{dummy_hostname}/api/metricValues/v1", status_code=200
     )
diff --git a/tests/test_datasets.py b/tests/test_datasets.py
index 6f549999..ed207c64 100644
--- a/tests/test_datasets.py
+++ b/tests/test_datasets.py
@@ -3,14 +3,14 @@
 Unit tests at top (no live Domino deployment required).
 Integration tests below (skipped unless a live deployment is reachable).
 """
+
 import os
 import random
+from unittest.mock import patch
 
 import pytest
-from unittest.mock import patch
 
-from domino import Domino
-from domino import exceptions
+from domino import Domino, exceptions
 from domino.helpers import domino_is_reachable
 
 MOCK_PROJECT_ID = "aabbccddeeff001122334455"
@@ -46,6 +46,7 @@ def base_mocks(requests_mock, dummy_hostname):
 # Unit tests (no live Domino deployment required)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_datasets_list_returns_list(requests_mock, dummy_hostname):
     requests_mock.get(
@@ -103,9 +104,7 @@ def test_datasets_details_returns_dataset(requests_mock, dummy_hostname):
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_datasets_create_returns_new_dataset(requests_mock, dummy_hostname):
-    requests_mock.get(
-        f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}", json=[]
-    )
+    requests_mock.get(f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}", json=[])
     requests_mock.post(f"{dummy_hostname}/dataset", json=MOCK_DATASET_1)
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     result = d.datasets_create("dataset-one", "First dataset")
@@ -125,9 +124,7 @@ def test_datasets_create_raises_when_name_already_exists(requests_mock, dummy_ho
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_datasets_create_sends_correct_payload(requests_mock, dummy_hostname):
-    requests_mock.get(
-        f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}", json=[]
-    )
+    requests_mock.get(f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}", json=[])
     requests_mock.post(f"{dummy_hostname}/dataset", json=MOCK_DATASET_1)
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     d.datasets_create("dataset-one", "First dataset")
@@ -143,12 +140,16 @@ def test_datasets_update_details_returns_updated_dataset(requests_mock, dummy_ho
     requests_mock.patch(f"{dummy_hostname}/dataset/{MOCK_DATASET_ID_1}", json={})
     requests_mock.get(f"{dummy_hostname}/dataset/{MOCK_DATASET_ID_1}", json=updated)
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    result = d.datasets_update_details(MOCK_DATASET_ID_1, dataset_description="Updated description")
+    result = d.datasets_update_details(
+        MOCK_DATASET_ID_1, dataset_description="Updated description"
+    )
     assert result["datasetDescription"] == "Updated description"
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_datasets_update_details_raises_when_new_name_already_exists(requests_mock, dummy_hostname):
+def test_datasets_update_details_raises_when_new_name_already_exists(
+    requests_mock, dummy_hostname
+):
     requests_mock.get(
         f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}",
         json=[MOCK_DATASET_1, MOCK_DATASET_2],
@@ -160,9 +161,7 @@ def test_datasets_update_details_raises_when_new_name_already_exists(requests_mo
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_dataset_remove_raises_when_dataset_missing(requests_mock, dummy_hostname):
-    requests_mock.get(
-        f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}", json=[]
-    )
+    requests_mock.get(f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}", json=[])
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     with pytest.raises(exceptions.DatasetNotFoundException):
         d._dataset_remove("nonexistent-id")
@@ -188,8 +187,12 @@ def test_datasets_remove_delegates_to_dataset_remove(requests_mock, dummy_hostna
         f"{dummy_hostname}/dataset?projectId={MOCK_PROJECT_ID}",
         json=[MOCK_DATASET_1, MOCK_DATASET_2],
     )
-    requests_mock.delete(f"{dummy_hostname}/dataset/{MOCK_DATASET_ID_1}", status_code=204)
-    requests_mock.delete(f"{dummy_hostname}/dataset/{MOCK_DATASET_ID_2}", status_code=204)
+    requests_mock.delete(
+        f"{dummy_hostname}/dataset/{MOCK_DATASET_ID_1}", status_code=204
+    )
+    requests_mock.delete(
+        f"{dummy_hostname}/dataset/{MOCK_DATASET_ID_2}", status_code=204
+    )
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     responses = d.datasets_remove([MOCK_DATASET_ID_1, MOCK_DATASET_ID_2])
     assert len(responses) == 2
@@ -211,6 +214,7 @@ def test_datasets_remove_raises_for_missing_id(requests_mock, dummy_hostname):
 # Integration tests (require a live Domino deployment)
 # ---------------------------------------------------------------------------
 
+
 @pytest.fixture
 def random_seq():
     rand_val = random.randint(1000, 8888)
@@ -308,7 +312,9 @@ def test_datasets_upload(default_domino_client):
     ]
     assert "test_datasets.py" in os.listdir("tests")
     local_path_to_file = "tests/test_datasets.py"
-    response = default_domino_client.datasets_upload_files(datasets_id, local_path_to_file)
+    response = default_domino_client.datasets_upload_files(
+        datasets_id, local_path_to_file
+    )
 
     assert "test_datasets.py" in response
 
@@ -322,15 +328,16 @@ def test_datasets_upload_with_sub_dir(default_domino_client):
     ]
     assert "test_datasets.py" in os.listdir("tests")
     local_path_to_file = "tests/test_datasets.py"
-    response = default_domino_client.datasets_upload_files(datasets_id, local_path_to_file,
-                                                           target_relative_path="sub_d")
+    response = default_domino_client.datasets_upload_files(
+        datasets_id, local_path_to_file, target_relative_path="sub_d"
+    )
     assert "test_datasets.py" in response
 
 
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
-@patch('os.path.exists')
+@patch("os.path.exists")
 def test_datasets_upload_mixed_slash_path(mock_exists, default_domino_client):
     mock_exists.return_value = True
     datasets_id = default_domino_client.datasets_ids(default_domino_client.project_id)[
@@ -338,15 +345,16 @@ def test_datasets_upload_mixed_slash_path(mock_exists, default_domino_client):
     ]
     assert "back\\slash.txt" in os.listdir("tests/assets")
     local_path_to_file = "tests/assets/back\\slash.txt"
-    response = default_domino_client.datasets_upload_files(datasets_id,
-                                                           local_path_to_file)
+    response = default_domino_client.datasets_upload_files(
+        datasets_id, local_path_to_file
+    )
     assert "back\\slash.txt" in response
 
 
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
-@patch('os.path.exists')
+@patch("os.path.exists")
 def test_datasets_upload_windows_path(mock_exists, default_domino_client):
     mock_exists.return_value = True
     datasets_id = default_domino_client.datasets_ids(default_domino_client.project_id)[
@@ -354,15 +362,16 @@ def test_datasets_upload_windows_path(mock_exists, default_domino_client):
     ]
     assert "test_datasets.py" in os.listdir("tests")
     windows_local_path_to_file = "tests\\test_datasets.py"
-    response = default_domino_client.datasets_upload_files(datasets_id,
-                                                           windows_local_path_to_file)
+    response = default_domino_client.datasets_upload_files(
+        datasets_id, windows_local_path_to_file
+    )
     assert "test_datasets.py" in response
 
 
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
-@patch('os.path.exists')
+@patch("os.path.exists")
 def test_datasets_upload_with_sub_dir_windows_path(mock_exists, default_domino_client):
     mock_exists.return_value = True
     datasets_id = default_domino_client.datasets_ids(default_domino_client.project_id)[
@@ -370,9 +379,9 @@ def test_datasets_upload_with_sub_dir_windows_path(mock_exists, default_domino_c
     ]
     assert "test_datasets.py" in os.listdir("tests")
     windows_local_path_to_file = "tests\\test_datasets.py"
-    response = default_domino_client.datasets_upload_files(datasets_id,
-                                                           windows_local_path_to_file,
-                                                           target_relative_path="sub_d")
+    response = default_domino_client.datasets_upload_files(
+        datasets_id, windows_local_path_to_file, target_relative_path="sub_d"
+    )
 
     assert "test_datasets.py" in response
 
@@ -380,7 +389,7 @@ def test_datasets_upload_with_sub_dir_windows_path(mock_exists, default_domino_c
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
-@patch('os.path.exists')
+@patch("os.path.exists")
 def test_datasets_upload_directory_windows_path(mock_exists, default_domino_client):
     mock_exists.return_value = True
     datasets_id = default_domino_client.datasets_ids(default_domino_client.project_id)[
@@ -388,8 +397,9 @@ def test_datasets_upload_directory_windows_path(mock_exists, default_domino_clie
     ]
     assert os.path.isdir("tests/assets")
     windows_local_path_to_dir = "tests/assets"
-    response = default_domino_client.datasets_upload_files(datasets_id,
-                                                           windows_local_path_to_dir)
+    response = default_domino_client.datasets_upload_files(
+        datasets_id, windows_local_path_to_dir
+    )
     assert "tests/assets" in response
 
 
diff --git a/tests/test_deprecations.py b/tests/test_deprecations.py
index 4da9cd73..e6306a4c 100644
--- a/tests/test_deprecations.py
+++ b/tests/test_deprecations.py
@@ -2,6 +2,7 @@
 Tests confirming that renamed camelCase parameters emit DeprecationWarning
 and that the call still succeeds with the old name.
 """
+
 import pytest
 
 from domino import Domino
@@ -22,6 +23,7 @@ def client(requests_mock, dummy_hostname):
 # runs_start
 # ---------------------------------------------------------------------------
 
+
 class TestRunsStartDeprecations:
     @pytest.fixture(autouse=True)
     def mock_runs_start(self, requests_mock, dummy_hostname):
@@ -48,9 +50,13 @@ def test_publishApiEndpoint_warns(self, client):
     @pytest.mark.usefixtures("clear_token_file_from_env")
     def test_new_names_accepted_without_warning(self, client, recwarn):
         client.runs_start("main.py", is_direct=True, commit_id=MOCK_COMMIT_ID)
-        deprecation_warnings = [w for w in recwarn.list if issubclass(w.category, DeprecationWarning)
-                                and "deprecated" in str(w.message).lower()
-                                and any(x in str(w.message) for x in ["isDirect", "commitId"])]
+        deprecation_warnings = [
+            w
+            for w in recwarn.list
+            if issubclass(w.category, DeprecationWarning)
+            and "deprecated" in str(w.message).lower()
+            and any(x in str(w.message) for x in ["isDirect", "commitId"])
+        ]
         assert len(deprecation_warnings) == 0
 
 
@@ -58,16 +64,29 @@ def test_new_names_accepted_without_warning(self, client, recwarn):
 # runs_start_blocking (spot-check one param — same shim logic)
 # ---------------------------------------------------------------------------
 
+
 class TestRunsStartBlockingDeprecations:
     @pytest.fixture(autouse=True)
     def mock_endpoints(self, requests_mock, dummy_hostname):
         requests_mock.post(
             f"{dummy_hostname}/v1/projects/anyuser/anyproject/runs",
-            json={"runId": MOCK_RUN_ID, "outputCommitId": MOCK_COMMIT_ID, "status": "Succeeded"},
+            json={
+                "runId": MOCK_RUN_ID,
+                "outputCommitId": MOCK_COMMIT_ID,
+                "status": "Succeeded",
+            },
         )
         requests_mock.get(
             f"{dummy_hostname}/v1/projects/anyuser/anyproject/runs",
-            json={"data": [{"id": MOCK_RUN_ID, "outputCommitId": MOCK_COMMIT_ID, "status": "Succeeded"}]},
+            json={
+                "data": [
+                    {
+                        "id": MOCK_RUN_ID,
+                        "outputCommitId": MOCK_COMMIT_ID,
+                        "status": "Succeeded",
+                    }
+                ]
+            },
         )
         requests_mock.get(
             f"{dummy_hostname}/v1/projects/anyuser/anyproject/run/{MOCK_RUN_ID}/stdout",
@@ -77,13 +96,16 @@ def mock_endpoints(self, requests_mock, dummy_hostname):
     @pytest.mark.usefixtures("clear_token_file_from_env")
     def test_isDirect_warns(self, client):
         with pytest.warns(DeprecationWarning, match="isDirect is deprecated"):
-            client.runs_start_blocking("main.py", isDirect=True, poll_freq=1, max_poll_time=5)
+            client.runs_start_blocking(
+                "main.py", isDirect=True, poll_freq=1, max_poll_time=5
+            )
 
 
 # ---------------------------------------------------------------------------
 # run_stop / runs_status / get_run_log / runs_stdout
 # ---------------------------------------------------------------------------
 
+
 class TestLegacyRunMethodDeprecations:
     @pytest.fixture(autouse=True)
     def mock_run_endpoints(self, requests_mock, dummy_hostname):
@@ -137,6 +159,7 @@ def test_runs_stdout_runId_warns(self, client):
 # files_list
 # ---------------------------------------------------------------------------
 
+
 class TestFilesListDeprecations:
     @pytest.fixture(autouse=True)
     def mock_files_list(self, requests_mock, dummy_hostname):
@@ -155,6 +178,7 @@ def test_commitId_warns(self, client):
 # endpoint_publish
 # ---------------------------------------------------------------------------
 
+
 class TestEndpointPublishDeprecations:
     @pytest.fixture(autouse=True)
     def mock_endpoint_publish(self, requests_mock, dummy_hostname):
@@ -173,6 +197,7 @@ def test_commitId_warns(self, client):
 # app_publish / app_unpublish
 # ---------------------------------------------------------------------------
 
+
 class TestAppDeprecations:
     @pytest.fixture(autouse=True)
     def mock_app_endpoints(self, requests_mock, dummy_hostname):
@@ -196,7 +221,9 @@ def test_app_publish_appId_warns(self, client):
 
     @pytest.mark.usefixtures("clear_token_file_from_env")
     def test_app_publish_unpublishRunningApps_warns(self, client):
-        with pytest.warns(DeprecationWarning, match="unpublishRunningApps is deprecated"):
+        with pytest.warns(
+            DeprecationWarning, match="unpublishRunningApps is deprecated"
+        ):
             client.app_publish(app_id=MOCK_APP_ID, unpublishRunningApps=True)
 
     @pytest.mark.usefixtures("clear_token_file_from_env")
diff --git a/tests/test_domino.py b/tests/test_domino.py
index 3e3bd069..c943167d 100644
--- a/tests/test_domino.py
+++ b/tests/test_domino.py
@@ -1,7 +1,8 @@
 import os
-import pytest
 import time
 
+import pytest
+
 from domino import Domino
 from domino.http_request_manager import _HttpRequestManager
 
@@ -14,7 +15,9 @@ def test_versioning(requests_mock, dummy_hostname):
     # Mock a typical response from the jobs status API endpoint (GET)
     requests_mock.get(f"{dummy_hostname}/version", json={"version": "5.10.0"})
 
-    dom = Domino(host=dummy_hostname, project="rand_user/rand_project", api_key="rand_api_key")
+    dom = Domino(
+        host=dummy_hostname, project="rand_user/rand_project", api_key="rand_api_key"
+    )
 
     dep_version = dom.deployment_version().get("version")
     assert dep_version == "5.10.0"
@@ -28,13 +31,13 @@ def test_request_session(test_auth_base):
     start_time = time.time()
     try:
         response = request_manager.request_session.get(
-            'https://localhost:9999'  # ConnectionError
+            "https://localhost:9999"  # ConnectionError
         )
     except Exception as ex:
-        print('It failed :(', ex.__class__.__name__)
+        print("It failed :(", ex.__class__.__name__)
     else:
-        print('It eventually worked', response.status_code)
+        print("It eventually worked", response.status_code)
     finally:
         end_time = time.time()
         total_time = end_time - start_time
-        assert (total_time > 5)  # actual value should be around 6.0210....
+        assert total_time > 5  # actual value should be around 6.0210....
diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py
index eea21d31..fc38d599 100644
--- a/tests/test_endpoints.py
+++ b/tests/test_endpoints.py
@@ -4,6 +4,7 @@
 Unit tests at top (no live Domino deployment required).
 Integration tests below (skipped unless a live deployment is reachable).
 """
+
 from pprint import pformat
 
 import pytest
@@ -45,6 +46,7 @@ def base_mocks(requests_mock, dummy_hostname):
 # API Endpoints (endpoint_state / endpoint_publish / endpoint_unpublish)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_endpoint_state_returns_dict(requests_mock, dummy_hostname):
     requests_mock.get(
@@ -99,7 +101,9 @@ def test_endpoint_publish_returns_response(requests_mock, dummy_hostname):
         status_code=200,
     )
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
-    response = d.endpoint_publish(file="predict.py", function="predict", commitId=MOCK_COMMIT_ID)
+    response = d.endpoint_publish(
+        file="predict.py", function="predict", commitId=MOCK_COMMIT_ID
+    )
     assert response.status_code == 200
 
 
@@ -107,6 +111,7 @@ def test_endpoint_publish_returns_response(requests_mock, dummy_hostname):
 # Model Endpoints (models_list / model_publish / model_version_*)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_models_list_returns_list(requests_mock, dummy_hostname):
     requests_mock.get(
@@ -244,6 +249,7 @@ def test_model_version_export_logs_returns_dict(requests_mock, dummy_hostname):
 # Integration tests (require a live Domino deployment)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
diff --git a/tests/test_environments.py b/tests/test_environments.py
index 81f6f0f9..009ff080 100644
--- a/tests/test_environments.py
+++ b/tests/test_environments.py
@@ -33,6 +33,7 @@ def base_mocks(requests_mock, dummy_hostname):
 # Unit tests (no live Domino deployment required)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_environments_list_returns_list(requests_mock, dummy_hostname):
     requests_mock.get(f"{dummy_hostname}/v1/environments", json=[MOCK_ENVIRONMENT])
@@ -121,6 +122,7 @@ def test_useable_environments_list_returns_list(requests_mock, dummy_hostname):
 # Integration tests (require a live Domino deployment)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
diff --git a/tests/test_finops.py b/tests/test_finops.py
index df72805c..50ea52ed 100644
--- a/tests/test_finops.py
+++ b/tests/test_finops.py
@@ -3,7 +3,9 @@
 Unit tests at top (no live Domino deployment required).
 Integration tests below (skipped unless a live deployment is reachable).
 """
+
 import uuid
+
 import pytest
 
 from domino import Domino
@@ -39,6 +41,7 @@ def base_mocks(requests_mock, dummy_hostname):
 # Unit tests (no live Domino deployment required)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_budget_defaults_list_returns_list(requests_mock, dummy_hostname):
     requests_mock.get(
@@ -94,7 +97,9 @@ def test_budget_override_create_sends_correct_payload(requests_mock, dummy_hostn
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_budget_override_update_sends_put_with_correct_url(requests_mock, dummy_hostname):
+def test_budget_override_update_sends_put_with_correct_url(
+    requests_mock, dummy_hostname
+):
     update_mock = requests_mock.put(
         f"{dummy_hostname}/v4/cost/budgets/overrides/{MOCK_BUDGET_ID}",
         json={"labelId": MOCK_BUDGET_ID, "limit": 0.9},
@@ -233,7 +238,9 @@ def test_billing_tag_settings_mode_returns_dict(requests_mock, dummy_hostname):
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_billing_tag_settings_mode_update_sends_correct_payload(requests_mock, dummy_hostname):
+def test_billing_tag_settings_mode_update_sends_correct_payload(
+    requests_mock, dummy_hostname
+):
     update_mock = requests_mock.put(
         f"{dummy_hostname}/v4/cost/billingtagSettings/mode",
         json={"mode": "Required"},
@@ -255,7 +262,9 @@ def test_project_billing_tag_returns_tag(requests_mock, dummy_hostname):
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_project_billing_tag_update_sends_correct_payload(requests_mock, dummy_hostname):
+def test_project_billing_tag_update_sends_correct_payload(
+    requests_mock, dummy_hostname
+):
     update_mock = requests_mock.post(
         f"{dummy_hostname}/v4/projects/{MOCK_PROJECT_ID}/billingtag",
         json={"tag": "env-staging"},
@@ -289,7 +298,9 @@ def test_projects_by_billing_tag_returns_dict(requests_mock, dummy_hostname):
 
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
-def test_project_billing_tag_bulk_update_sends_correct_payload(requests_mock, dummy_hostname):
+def test_project_billing_tag_bulk_update_sends_correct_payload(
+    requests_mock, dummy_hostname
+):
     bulk_mock = requests_mock.post(
         f"{dummy_hostname}/v4/projects/billingtags/projects",
         json={"updated": 1},
@@ -307,6 +318,7 @@ def test_project_billing_tag_bulk_update_sends_correct_payload(requests_mock, du
 # Integration tests (require a live Domino deployment)
 # ---------------------------------------------------------------------------
 
+
 def get_short_id() -> str:
     return str(uuid.uuid4())[:8]
 
@@ -315,7 +327,10 @@ def get_short_id() -> str:
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
 @pytest.mark.parametrize("new_limit", [0.0006, 0.0037])
-@pytest.mark.parametrize("budget_label", [BudgetLabel.PROJECT, BudgetLabel.ORGANIZATION, BudgetLabel.BILLINGTAG])
+@pytest.mark.parametrize(
+    "budget_label",
+    [BudgetLabel.PROJECT, BudgetLabel.ORGANIZATION, BudgetLabel.BILLINGTAG],
+)
 def test_budgets_defaults(budget_label, new_limit, default_domino_client):
     """
     Test get and update default budgets
@@ -327,7 +342,7 @@ def test_budgets_defaults(budget_label, new_limit, default_domino_client):
     budget_defaults = default_domino_client.budget_defaults_list()
     for budget in budget_defaults:
         if budget["budgetLabel"] == budget_label.value:
-            assert (budget["limit"] == new_limit)
+            assert budget["limit"] == new_limit
 
 
 @pytest.mark.skipif(
@@ -338,15 +353,25 @@ def test_budgets_overrides(default_domino_client):
     Test creating, updating and deleting budget overrides
     """
 
-    budget_ids = ["6626b3c64fa2ef89b5def375", "6626b3cf9106c3938a2c5f01", "6626b3d73473d4a99c2c642b"]
+    budget_ids = [
+        "6626b3c64fa2ef89b5def375",
+        "6626b3cf9106c3938a2c5f01",
+        "6626b3d73473d4a99c2c642b",
+    ]
     budget_limit: float = 0.2048
 
     curr_overrides = default_domino_client.budget_overrides_list()
     overrides_count_start = len(curr_overrides)
 
-    default_domino_client.budget_override_create(BudgetLabel.PROJECT, budget_ids[0], budget_limit)
-    default_domino_client.budget_override_create(BudgetLabel.ORGANIZATION, budget_ids[1], budget_limit)
-    default_domino_client.budget_override_create(BudgetLabel.BILLINGTAG, budget_ids[2], budget_limit)
+    default_domino_client.budget_override_create(
+        BudgetLabel.PROJECT, budget_ids[0], budget_limit
+    )
+    default_domino_client.budget_override_create(
+        BudgetLabel.ORGANIZATION, budget_ids[1], budget_limit
+    )
+    default_domino_client.budget_override_create(
+        BudgetLabel.BILLINGTAG, budget_ids[2], budget_limit
+    )
 
     budget_overrides = default_domino_client.budget_overrides_list()
 
@@ -356,7 +381,9 @@ def test_budgets_overrides(default_domino_client):
             assert budget["limit"] == budget_limit
 
     new_limit = 0.1024
-    default_domino_client.budget_override_update(BudgetLabel.BILLINGTAG, budget_ids[2], new_limit)
+    default_domino_client.budget_override_update(
+        BudgetLabel.BILLINGTAG, budget_ids[2], new_limit
+    )
 
     updated_override = default_domino_client.budget_overrides_list()
     assert len(updated_override) >= 3
@@ -379,14 +406,18 @@ def test_budgets_alerts_settings(default_domino_client):
     Test creating a budget with current project, and other projects
     """
     alert_settings = default_domino_client.budget_alerts_settings()
-    assert 'alertsEnabled' in alert_settings.keys()
+    assert "alertsEnabled" in alert_settings.keys()
 
-    default_domino_client.budget_alerts_settings_update(alerts_enabled=False, notify_org_owner=False)
+    default_domino_client.budget_alerts_settings_update(
+        alerts_enabled=False, notify_org_owner=False
+    )
     update_setting_1 = default_domino_client.budget_alerts_settings()
     assert update_setting_1["alertsEnabled"] is False
     assert update_setting_1["notifyOrgOwner"] is False
 
-    default_domino_client.budget_alerts_settings_update(alerts_enabled=True, notify_org_owner=True)
+    default_domino_client.budget_alerts_settings_update(
+        alerts_enabled=True, notify_org_owner=True
+    )
     update_setting_2 = default_domino_client.budget_alerts_settings()
     assert update_setting_2["alertsEnabled"] is True
     assert update_setting_2["notifyOrgOwner"] is True
@@ -426,15 +457,19 @@ def test_billing_tag_settings(default_domino_client):
     Test creating a budget with current project, and other projects
     """
     billing_tag_setting = default_domino_client.billing_tag_settings()
-    assert 'mode' in billing_tag_setting.keys()
+    assert "mode" in billing_tag_setting.keys()
 
-    default_domino_client.billing_tag_settings_mode_update(BillingTagSettingMode.REQUIRED)
+    default_domino_client.billing_tag_settings_mode_update(
+        BillingTagSettingMode.REQUIRED
+    )
     updated_tag_setting = default_domino_client.billing_tag_settings()
     assert updated_tag_setting["mode"] == BillingTagSettingMode.REQUIRED.value
     mode = default_domino_client.billing_tag_settings_mode()
     assert mode["mode"] == BillingTagSettingMode.REQUIRED.value
 
-    default_domino_client.billing_tag_settings_mode_update(BillingTagSettingMode.OPTIONAL)
+    default_domino_client.billing_tag_settings_mode_update(
+        BillingTagSettingMode.OPTIONAL
+    )
     newer_tag_setting = default_domino_client.billing_tag_settings()
     assert newer_tag_setting["mode"] == BillingTagSettingMode.OPTIONAL.value
 
@@ -446,7 +481,11 @@ def test_billing_tags(default_domino_client):
     """
     Test creating a budget with current project, and other projects
     """
-    billing_tags = ["PYTHON-DOMINO-ACTIVE-tag-001", "PYTHON-DOMINO-ACTIVE-tag-002", "PYTHON-DOMINO-ACTIVE-tag-003"]
+    billing_tags = [
+        "PYTHON-DOMINO-ACTIVE-tag-001",
+        "PYTHON-DOMINO-ACTIVE-tag-002",
+        "PYTHON-DOMINO-ACTIVE-tag-003",
+    ]
     new_billing_tags = default_domino_client.billing_tags_create(billing_tags)
     assert len(new_billing_tags["billingTags"]) == 3
 
@@ -473,17 +512,23 @@ def test_projects_billing_tag(default_domino_client):
 
     setting_mode = default_domino_client.billing_tag_settings_mode()
     if setting_mode["mode"] != BillingTagSettingMode.OPTIONAL.value:
-        default_domino_client.billing_tag_settings_mode_update(BillingTagSettingMode.OPTIONAL)
+        default_domino_client.billing_tag_settings_mode_update(
+            BillingTagSettingMode.OPTIONAL
+        )
 
     test_billing_tag = f"TestBillingTag-{get_short_id()}"
     test_billing_tag2 = f"TestBillingTag-{get_short_id()}"
     test_billing_tag3 = f"TestBillingTag-{get_short_id()}"
-    default_domino_client.billing_tags_create([test_billing_tag, test_billing_tag2, test_billing_tag3])
+    default_domino_client.billing_tags_create(
+        [test_billing_tag, test_billing_tag2, test_billing_tag3]
+    )
 
     test_project_name = f"project-{get_short_id()}"
     test_project_name_2 = f"project-{get_short_id()}"
 
-    bt_project = default_domino_client.project_create_v4(project_name=test_project_name, billing_tag=test_billing_tag)
+    bt_project = default_domino_client.project_create_v4(
+        project_name=test_project_name, billing_tag=test_billing_tag
+    )
     project = default_domino_client.project_create_v4(project_name=test_project_name_2)
 
     project_bt = default_domino_client.project_billing_tag(bt_project["id"])
@@ -500,15 +545,22 @@ def test_projects_billing_tag(default_domino_client):
     project_bt_reset = default_domino_client.project_billing_tag(project["id"])
     assert project_bt_reset is None
 
-    query_p = default_domino_client.projects_by_billing_tag(billing_tag=test_billing_tag)
+    query_p = default_domino_client.projects_by_billing_tag(
+        billing_tag=test_billing_tag
+    )
     assert query_p["totalMatches"] == 1
     assert query_p["page"][0]["id"] == bt_project["id"]
     assert query_p["page"][0]["billingTag"]["tag"] == test_billing_tag
 
-    projects_tags = {bt_project["id"]: test_billing_tag3, project["id"]: test_billing_tag3}
+    projects_tags = {
+        bt_project["id"]: test_billing_tag3,
+        project["id"]: test_billing_tag3,
+    }
     default_domino_client.project_billing_tag_bulk_update(projects_tags)
 
-    query_p = default_domino_client.projects_by_billing_tag(billing_tag=test_billing_tag3)
+    query_p = default_domino_client.projects_by_billing_tag(
+        billing_tag=test_billing_tag3
+    )
     assert query_p["totalMatches"] == 2
     assert query_p["page"][0]["id"] in {bt_project["id"], project["id"]}
     assert query_p["page"][0]["billingTag"]["tag"] == test_billing_tag3
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 840009e6..d740afbb 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -2,6 +2,7 @@
 Unit tests for domino.helpers module.
 No HTTP calls — pure logic tests.
 """
+
 import pytest
 
 from domino.helpers import (
@@ -14,9 +15,9 @@
     is_version_compatible,
 )
 
-
 # --- is_version_compatible ---
 
+
 def test_is_version_compatible_returns_true_for_supported_version():
     assert is_version_compatible("5.0.0") is True
 
@@ -27,13 +28,18 @@ def test_is_version_compatible_returns_false_for_old_version():
 
 def test_is_version_compatible_returns_true_for_exact_minimum():
     from domino.constants import MINIMUM_SUPPORTED_DOMINO_VERSION
+
     assert is_version_compatible(MINIMUM_SUPPORTED_DOMINO_VERSION) is True
 
 
 # --- clean_host_url ---
 
+
 def test_clean_host_url_strips_path():
-    assert clean_host_url("https://domino.example.com/some/path") == "https://domino.example.com"
+    assert (
+        clean_host_url("https://domino.example.com/some/path")
+        == "https://domino.example.com"
+    )
 
 
 def test_clean_host_url_preserves_scheme_and_host():
@@ -50,6 +56,7 @@ def test_clean_host_url_handles_trailing_slash():
 
 # --- is_compute_cluster_autoscaling_supported ---
 
+
 def test_autoscaling_supported_for_high_version():
     assert is_compute_cluster_autoscaling_supported("9.9.9") is True
 
@@ -60,6 +67,7 @@ def test_autoscaling_not_supported_for_low_version():
 
 # --- is_compute_cluster_properties_supported ---
 
+
 def test_cluster_properties_supported_for_high_version():
     assert is_compute_cluster_properties_supported("9.9.9") is True
 
@@ -70,6 +78,7 @@ def test_cluster_properties_not_supported_for_low_version():
 
 # --- is_on_demand_spark_cluster_supported ---
 
+
 def test_on_demand_spark_supported_for_high_version():
     assert is_on_demand_spark_cluster_supported("9.9.9") is True
 
@@ -80,6 +89,7 @@ def test_on_demand_spark_not_supported_for_low_version():
 
 # --- is_external_volume_mounts_supported ---
 
+
 def test_external_volume_mounts_supported_for_high_version():
     assert is_external_volume_mounts_supported("9.9.9") is True
 
@@ -90,6 +100,7 @@ def test_external_volume_mounts_not_supported_for_low_version():
 
 # --- is_cluster_type_supported ---
 
+
 @pytest.mark.parametrize("cluster_type", ["Ray", "Dask", "MPI", "Spark"])
 def test_known_cluster_types_supported_for_high_version(cluster_type):
     result = is_cluster_type_supported("9.9.9", cluster_type)
diff --git a/tests/test_jobs.py b/tests/test_jobs.py
index 8d3c8314..bbdd4d3c 100644
--- a/tests/test_jobs.py
+++ b/tests/test_jobs.py
@@ -3,6 +3,7 @@
 Unit tests at top (no live Domino deployment required).
 Integration tests below (skipped unless a live deployment is reachable).
 """
+
 import time
 from pprint import pformat
 
@@ -10,8 +11,7 @@
 import pytest
 from requests.exceptions import RequestException
 
-from domino import Domino
-from domino import exceptions
+from domino import Domino, exceptions
 from domino.helpers import domino_is_reachable
 
 # Realistic mock IDs used in unit tests.
@@ -97,7 +97,8 @@ def mock_job_start_blocking_setup(requests_mock, dummy_hostname):
     project_endpoint = "v4/gateway/projects/findProjectByOwnerAndName"
     project_query = "ownerName=anyuser&projectName=anyproject"
     requests_mock.get(
-        f"{dummy_hostname}/{project_endpoint}?{project_query}", json={"id": MOCK_PROJECT_ID}
+        f"{dummy_hostname}/{project_endpoint}?{project_query}",
+        json={"id": MOCK_PROJECT_ID},
     )
 
     requests_mock.post(
@@ -147,6 +148,7 @@ def mock_job_start_blocking_setup(requests_mock, dummy_hostname):
 # Unit tests — v1 Runs API
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_runs_list_returns_dict(requests_mock, dummy_hostname):
     requests_mock.get(
@@ -249,6 +251,7 @@ def test_get_run_log_includes_setup_by_default(requests_mock, dummy_hostname):
 # Unit tests — v4 Jobs API
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_job_start_reraises_relogin_exception(dummy_hostname):
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
@@ -281,7 +284,9 @@ def test_job_stop_defaults_commit_results_true(requests_mock, dummy_hostname):
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_job_restart_sends_correct_payload(requests_mock, dummy_hostname):
     restart_mock = requests_mock.post(
-        f"{dummy_hostname}/v4/jobs/restart", json=MOCK_JOB_RESPONSE_SIMPLE, status_code=200
+        f"{dummy_hostname}/v4/jobs/restart",
+        json=MOCK_JOB_RESPONSE_SIMPLE,
+        status_code=200,
     )
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     d.job_restart(MOCK_JOB_ID)
@@ -328,6 +333,7 @@ def test_hardware_tiers_list_returns_list(requests_mock, dummy_hostname):
 # Unit tests — job_start_blocking
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup")
 def test_job_status_completes_with_default_params(requests_mock, dummy_hostname):
     """
@@ -365,8 +371,7 @@ def test_job_start_sends_main_repo_git_ref(requests_mock, dummy_hostname):
     )
 
     jobs_start_request = next(
-        req for req in requests_mock.request_history
-        if req.path == "/v4/jobs/start"
+        req for req in requests_mock.request_history if req.path == "/v4/jobs/start"
     )
     assert jobs_start_request.json()["mainRepoGitRef"] == git_ref
 
@@ -405,6 +410,7 @@ def test_job_status_without_ignoring_exceptions(requests_mock, dummy_hostname):
 # Integration tests (require a live Domino deployment)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
@@ -425,7 +431,9 @@ def test_job_start_override_hardware_tier_id(default_domino_client):
     """
     hardware_tiers = default_domino_client.hardware_tiers_list()
     non_default_hardware_tiers = [
-        hwt for hwt in hardware_tiers if not hwt["hardwareTier"]["hwtFlags"]["isDefault"]
+        hwt
+        for hwt in hardware_tiers
+        if not hwt["hardwareTier"]["hwtFlags"]["isDefault"]
     ]
     if len(non_default_hardware_tiers) == 0:
         pytest.xfail("No non-default hardware tiers found: cannot run test")
@@ -450,7 +458,9 @@ def test_job_start_override_hardware_tier_name(default_domino_client):
     hardware_tiers = default_domino_client.hardware_tiers_list()
 
     non_default_hardware_tiers = [
-        hwt for hwt in hardware_tiers if not hwt["hardwareTier"]["hwtFlags"]["isDefault"]
+        hwt
+        for hwt in hardware_tiers
+        if not hwt["hardwareTier"]["hwtFlags"]["isDefault"]
     ]
     if len(non_default_hardware_tiers) == 0:
         pytest.xfail("No non-default hardware tiers found: cannot run test")
diff --git a/tests/test_operator.py b/tests/test_operator.py
index 6b52358c..480333b5 100644
--- a/tests/test_operator.py
+++ b/tests/test_operator.py
@@ -2,16 +2,14 @@
 from datetime import datetime
 
 import pytest
-from airflow.operators.dummy import DummyOperator
-from airflow import settings
+from airflow import DAG, settings
+from airflow.models import TaskInstance
 from airflow.models.base import Base
+from airflow.operators.dummy import DummyOperator
 
 from domino.airflow import DominoOperator
 from domino.exceptions import RunFailedException
 from domino.helpers import domino_is_reachable
-from airflow import DAG
-from airflow.models import TaskInstance
-
 
 TEST_PROJECT = os.environ.get("DOMINO_TEST_PROJECT")
 dag_id = "test_operator"
@@ -35,7 +33,7 @@ def test_airflow_dags():
     dag = DAG(dag_id, start_date=start_time)
     task = DummyOperator(
         dag=dag,
-        task_id='test_airflow_dags',
+        task_id="test_airflow_dags",
     )
 
     task.run()
diff --git a/tests/test_project_id.py b/tests/test_project_id.py
index f2289fa4..a7b1b805 100644
--- a/tests/test_project_id.py
+++ b/tests/test_project_id.py
@@ -2,10 +2,10 @@
 Unit tests for the project_id property.
 All tests use requests_mock — no live Domino deployment required.
 """
+
 import pytest
 
-from domino import Domino
-from domino import exceptions
+from domino import Domino, exceptions
 
 MOCK_PROJECT_ID = "aabbccddeeff001122334455"
 
@@ -57,7 +57,6 @@ def test_project_id_is_cached(requests_mock, dummy_hostname):
     _ = d.project_id
 
     find_calls = [
-        r for r in requests_mock.request_history
-        if "findProjectByOwnerAndName" in r.url
+        r for r in requests_mock.request_history if "findProjectByOwnerAndName" in r.url
     ]
     assert len(find_calls) == 1
diff --git a/tests/test_projects.py b/tests/test_projects.py
index be27f204..16fd870e 100644
--- a/tests/test_projects.py
+++ b/tests/test_projects.py
@@ -3,6 +3,7 @@
 Unit tests at top (no live Domino deployment required).
 Integration tests below (skipped unless a live deployment is reachable).
 """
+
 import uuid
 import warnings
 from pprint import pformat
@@ -40,6 +41,7 @@ def base_mocks(requests_mock, dummy_hostname):
 # Unit tests (no live Domino deployment required)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_deployment_version_returns_dict(requests_mock, dummy_hostname):
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
@@ -80,6 +82,7 @@ def test_files_upload_sends_put(requests_mock, dummy_hostname):
     )
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     import io
+
     response = d.files_upload("test.py", io.BytesIO(b"print('hello')"))
     assert upload_mock.called
     assert response.status_code == 201
@@ -94,6 +97,7 @@ def test_files_upload_prepends_slash_if_missing(requests_mock, dummy_hostname):
     )
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     import io
+
     d.files_upload("test.py", io.BytesIO(b""))
     assert upload_mock.called
 
@@ -254,6 +258,7 @@ def test_project_archive_raises_for_nonexistent_project(requests_mock, dummy_hos
 # Integration tests (require a live Domino deployment)
 # ---------------------------------------------------------------------------
 
+
 @pytest.mark.skipif(
     not domino_is_reachable(), reason="No access to a live Domino deployment"
 )
@@ -381,7 +386,9 @@ def test_get_file_from_a_project_v2(default_domino_client):
 
     for file in files_list["data"]:
         if file["path"] == ".dominoignore":
-            file_contents = default_domino_client.blobs_get_v2(file["path"], commits_list[0], default_domino_client.project_id).read()
+            file_contents = default_domino_client.blobs_get_v2(
+                file["path"], commits_list[0], default_domino_client.project_id
+            ).read()
             break
 
     assert "ignore certain files" in str(
@@ -400,7 +407,9 @@ def test_get_blobs_v2_non_canonical(default_domino_client):
     commits_list = default_domino_client.commits_list()
 
     with pytest.raises(exceptions.MalformedInputException):
-        default_domino_client.blobs_get_v2(non_canonical_path, commits_list[0], default_domino_client.project_id).read()
+        default_domino_client.blobs_get_v2(
+            non_canonical_path, commits_list[0], default_domino_client.project_id
+        ).read()
 
 
 @pytest.mark.skipif(
diff --git a/tests/test_spark_operator.py b/tests/test_spark_operator.py
index 00f14557..52edc618 100644
--- a/tests/test_spark_operator.py
+++ b/tests/test_spark_operator.py
@@ -4,11 +4,13 @@
 
 `airflow db init`
 """
+
 import os
 from datetime import datetime
+
+import pytest
 from airflow import DAG
 from airflow.models import TaskInstance
-import pytest
 
 from domino.airflow import DominoSparkOperator
 from domino.exceptions import RunFailedException

From 9fb3adcc3d61de52e7c5f9eaf91365b1931a3b72 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 19:26:10 +0100
Subject: [PATCH 10/20] resolve all 38 pre-existing mypy type errors

---
 .pre-commit-config.yaml              |  1 +
 domino/_custom_metrics.py            | 18 +++++++++---------
 domino/agents/logging/dominorun.py   |  2 +-
 domino/agents/read_agent_config.py   |  4 ++--
 domino/agents/tracing/_util.py       |  4 +++-
 domino/agents/tracing/inittracing.py |  3 +--
 domino/agents/tracing/tracing.py     | 11 +++++++----
 domino/airflow/_operator.py          |  2 +-
 domino/datasets.py                   | 22 +++++++++++++---------
 domino/domino.py                     | 18 ++++++++++--------
 10 files changed, 48 insertions(+), 37 deletions(-)

diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fdd87e63..c5f7f1df 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,6 +6,7 @@ repos:
         entry: python scripts/check_snake_case.py
         language: python
         files: ^domino/.*\.py$
+        exclude: ^domino/_impl/|^domino/airflow/
         pass_filenames: true
   - repo: https://github.com/pre-commit/pre-commit-hooks
     rev: v5.0.0
diff --git a/domino/_custom_metrics.py b/domino/_custom_metrics.py
index 69197d7f..d809825c 100644
--- a/domino/_custom_metrics.py
+++ b/domino/_custom_metrics.py
@@ -36,10 +36,10 @@ def trigger_alert(
         model_monitoring_id: str,
         metric: str,
         value: Any,
-        condition: str = None,
+        condition: Optional[str] = None,
         lower_limit: Any = None,
         upper_limit: Any = None,
-        description: str = None,
+        description: Optional[str] = None,
     ) -> None:
         pass
 
@@ -50,7 +50,7 @@ def log_metric(
         metric: str,
         value: Any,
         timestamp: str,
-        tags: Dict = None,
+        tags: Optional[Dict] = None,
     ) -> None:
         pass
 
@@ -83,10 +83,10 @@ def trigger_alert(
         model_monitoring_id: str,
         metric: str,
         value: Any,
-        condition: str = None,
+        condition: Optional[str] = None,
         lower_limit: Any = None,
         upper_limit: Any = None,
-        description: str = None,
+        description: Optional[str] = None,
     ) -> None:
         url = self._routes.metric_alerts()
         target_range: Optional[TargetRangeV1] = (
@@ -117,7 +117,7 @@ def log_metric(
         metric: str,
         value: Any,
         timestamp: str,
-        tags: Dict = None,
+        tags: Optional[Dict] = None,
     ) -> None:
         item = {
             "modelMonitoringId": model_monitoring_id,
@@ -198,10 +198,10 @@ def trigger_alert(
         model_monitoring_id: str,
         metric: str,
         value: Any,
-        condition: str = None,
+        condition: Optional[str] = None,
         lower_limit: Any = None,
         upper_limit: Any = None,
-        description: str = None,
+        description: Optional[str] = None,
     ) -> None:
         url = self._routes.metric_alerts()
         request = {
@@ -225,7 +225,7 @@ def log_metric(
         metric: str,
         value: Any,
         timestamp: str,
-        tags: Dict = None,
+        tags: Optional[Dict] = None,
     ) -> None:
         item = {
             "modelMonitoringId": model_monitoring_id,
diff --git a/domino/agents/logging/dominorun.py b/domino/agents/logging/dominorun.py
index 1482841a..6790fcbe 100644
--- a/domino/agents/logging/dominorun.py
+++ b/domino/agents/logging/dominorun.py
@@ -124,7 +124,7 @@ def __init__(
         experiment_name: Optional[str] = None,
         run_id: Optional[str] = None,
         agent_config_path: Optional[str] = None,
-        custom_summary_metrics: Optional[list[(str, SummaryStatistic)]] = None,
+        custom_summary_metrics: Optional[list[tuple[str, SummaryStatistic]]] = None,
     ):
         """DominoRun is a context manager that starts an Mlflow run and attaches the user's Agent configuration to it,
         create a Logged Model with the Agent configuration, and computes summary metrics for evaluation traces made during the run.
diff --git a/domino/agents/read_agent_config.py b/domino/agents/read_agent_config.py
index 1e11ca17..b6842e04 100644
--- a/domino/agents/read_agent_config.py
+++ b/domino/agents/read_agent_config.py
@@ -1,6 +1,6 @@
 import logging
 import os
-from typing import Optional
+from typing import Any, Optional
 
 import yaml
 
@@ -21,7 +21,7 @@ def flatten_dict(d, parent_key="", sep="."):
     return dict(items)
 
 
-def get_flattened_agent_config(path: Optional[str] = None) -> dict[str, any]:
+def get_flattened_agent_config(path: Optional[str] = None) -> dict[str, Any]:
     config = read_agent_config(path)
     return flatten_dict(config)
 
diff --git a/domino/agents/tracing/_util.py b/domino/agents/tracing/_util.py
index ce764b49..ac766bc8 100644
--- a/domino/agents/tracing/_util.py
+++ b/domino/agents/tracing/_util.py
@@ -19,5 +19,7 @@ def build_agent_experiment_name(id: str) -> str:
 
 def get_running_agent_experiment_name() -> str | None:
     if is_agent():
-        return build_agent_experiment_name(_get_agent_id())
+        agent_id = _get_agent_id()
+        assert agent_id is not None
+        return build_agent_experiment_name(agent_id)
     return None
diff --git a/domino/agents/tracing/inittracing.py b/domino/agents/tracing/inittracing.py
index b229afda..22b8ee8f 100644
--- a/domino/agents/tracing/inittracing.py
+++ b/domino/agents/tracing/inittracing.py
@@ -13,8 +13,7 @@
 # autolog frameworks, then the worst case scenario is that we get duplicate autolog calls. These are local to the process
 # so not a big deal
 
-global triggered_autolog_frameworks
-triggered_autolog_frameworks = set()
+triggered_autolog_frameworks: set[str] = set()
 
 global _is_prod_tracing_initialized
 _is_prod_tracing_initialized = False
diff --git a/domino/agents/tracing/tracing.py b/domino/agents/tracing/tracing.py
index 771b9ff6..1b566bac 100644
--- a/domino/agents/tracing/tracing.py
+++ b/domino/agents/tracing/tracing.py
@@ -83,7 +83,7 @@ class SearchTracesResponse:
     """The token for the next page of results"""
 
 
-def _datetime_to_ms(dt: datetime) -> int:
+def _datetime_to_ms(dt: datetime) -> float:
     return dt.timestamp() * 1000
 
 
@@ -353,13 +353,15 @@ def gen_wrapper(*args, **kwargs):
 
 
 def _build_evaluation_result(tag_key: str, tag_value: str) -> EvaluationResult:
-    value = tag_value
+    value: float | str = tag_value
     try:
         value = float(tag_value)
     except Exception:
         pass
 
-    return EvaluationResult(name=get_eval_tag_name(tag_key), value=value)
+    name = get_eval_tag_name(tag_key)
+    assert name is not None
+    return EvaluationResult(name=name, value=value)
 
 
 def _build_trace_summaries(traces) -> list[TraceSummary]:
@@ -491,6 +493,7 @@ def _search_traces(
         run_filter_clause = f'metadata.mlflow.sourceRun = "{run_id}"'
         filter_clauses.append(run_filter_clause)
     else:
+        assert agent_id is not None
         experiment_name = build_agent_experiment_name(agent_id)
         experiment = client.get_experiment_by_name(experiment_name)
         if not experiment:
@@ -546,7 +549,7 @@ def _search_traces(
     return SearchTracesResponse(trace_summaries, next_page_token)
 
 
-def _return_traced_result(result: any):
+def _return_traced_result(result: Any):
     if result != DOMINO_NO_RESULT_ADD_TRACING:
         return result
     else:
diff --git a/domino/airflow/_operator.py b/domino/airflow/_operator.py
index 7643f535..68546ef2 100644
--- a/domino/airflow/_operator.py
+++ b/domino/airflow/_operator.py
@@ -37,7 +37,7 @@ def __init__(
         host: Optional[str] = None,
         api_key: Optional[str] = None,
         domino_token_file: Optional[str] = None,
-        isDirect: bool = None,
+        isDirect: Optional[bool] = None,
         commitId: Optional[str] = None,
         title: Optional[str] = None,
         tier: Optional[str] = None,
diff --git a/domino/datasets.py b/domino/datasets.py
index 0ed6a115..dd1543e2 100644
--- a/domino/datasets.py
+++ b/domino/datasets.py
@@ -5,7 +5,7 @@
 from concurrent.futures import ThreadPoolExecutor
 from dataclasses import dataclass
 from logging import Logger
-from typing import AnyStr
+from typing import Optional
 
 from retry import retry
 
@@ -39,16 +39,16 @@ class UploadChunk:
 class Uploader:
     def __init__(
         self,
-        csrf_no_check_header: {str, str},
+        csrf_no_check_header: dict[str, str],
         dataset_id: str,
         local_path_to_file_or_directory: str,
         log: Logger,
         request_manager: _HttpRequestManager,
         routes: _Routes,
-        target_relative_path: str,
-        file_upload_setting: str,
-        max_workers: int,
-        target_chunk_size: int,
+        target_relative_path: Optional[str],
+        file_upload_setting: Optional[str],
+        max_workers: Optional[int],
+        target_chunk_size: Optional[int],
         interrupted: bool = False,
     ):
         cleaned_relative_local_path = os.path.relpath(
@@ -70,7 +70,9 @@ def __init__(
         self.max_workers = max_workers or MAX_WORKERS
         self.target_relative_path = target_relative_path
         self.interrupted = interrupted
-        self.upload_key = None  # this will be set once the session is started
+        self.upload_key: Optional[str] = (
+            None  # this will be set once the session is started
+        )
 
     def __enter__(self):
         # creating upload session
@@ -157,6 +159,8 @@ def _create_chunk_queue(self) -> list[UploadChunk]:
         return chunk_q
 
     def _create_chunks(self, local_path_file, starting_index=1) -> list[UploadChunk]:
+        upload_key = self.upload_key
+        assert upload_key is not None
         file_size = os.path.getsize(local_path_file)
         file_name = os.path.basename(local_path_file)
         total_chunks = max(int(math.ceil(float(file_size) / self.target_chunk_size)), 1)
@@ -171,7 +175,7 @@ def _create_chunks(self, local_path_file, starting_index=1) -> list[UploadChunk]
                 relative_path=local_path_file,
                 target_chunk_size=self.target_chunk_size,
                 total_chunks=total_chunks,
-                upload_key=self.upload_key,
+                upload_key=upload_key,
             )
             for chunk_num in range(starting_index, total_chunks + 1)
         ]
@@ -240,7 +244,7 @@ def _upload_chunk_retry(self, checksum: str, chunk: UploadChunk, chunk_data):
             f"in {duration_ns / 1_000_000:.1f}ms ({bandwidth_bytes_per_second:.1f} B/s)"
         )
 
-    def _test_chunk(self, chunk: UploadChunk, chunk_data: AnyStr) -> (bool, int):
+    def _test_chunk(self, chunk: UploadChunk, chunk_data: bytes) -> tuple[bool, str]:
         # computing the MD5 checksum
         digest = hashlib.md5()
         digest.update(chunk_data)
diff --git a/domino/domino.py b/domino/domino.py
index 1a134458..09d40b62 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -588,7 +588,7 @@ def validate_distributed_compute_cluster_properties():
                     + f" This version of Domino supports the following cluster types: {supported_types_str}"
                 )
 
-            def throw_if_information_invalid(key: str, info: dict) -> bool:
+            def throw_if_information_invalid(key: str, info: dict) -> None:
                 try:
                     self._validate_information_data_type(info)
                 except Exception as e:
@@ -934,7 +934,7 @@ def project_create_v4(
         )
         data = {
             "name": project_name,
-            "visibility": visibility.value,
+            "visibility": visibility.value if visibility else None,
             "ownerId": owner,
             "description": description,
             "collaborators": collaborators if collaborators is not None else [],
@@ -1175,7 +1175,9 @@ def __app_get_status(self, app_id) -> Optional[str]:
         response = self.request_manager.get(url).json()
         return response.get("status", None)
 
-    def __app_create(self, name: str = "", hardware_tier_id: str = None) -> str:
+    def __app_create(
+        self, name: str = "", hardware_tier_id: Optional[str] = None
+    ) -> str:
         """
         Private method to create app
 
@@ -1541,10 +1543,10 @@ def datasets_upload_files(
         self,
         dataset_id: str,
         local_path_to_file_or_directory: str,
-        file_upload_setting: str = None,
-        max_workers: int = None,
-        target_chunk_size: int = None,
-        target_relative_path: str = None,
+        file_upload_setting: Optional[str] = None,
+        max_workers: Optional[int] = None,
+        target_chunk_size: Optional[int] = None,
+        target_relative_path: Optional[str] = None,
     ) -> str:
         """Upload file to dataset with multithreaded support.
 
@@ -1669,7 +1671,7 @@ def hardware_tiers_list(self):
         url = self._routes.hardware_tiers_list(self.project_id)
         return self._get(url)
 
-    def get_hardware_tier_id_from_name(self, hardware_tier_name: str):
+    def get_hardware_tier_id_from_name(self, hardware_tier_name: Optional[str]):
         for hardware_tier in self.hardware_tiers_list():
             if hardware_tier_name == hardware_tier["hardwareTier"]["name"]:
                 return hardware_tier["hardwareTier"]["id"]

From e67b1e796e63e98a2a593cf5f9b4b13c215c7307 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 19:35:33 +0100
Subject: [PATCH 11/20] add GitHub Actions workflow and update test
 infrastructure

  - Add .github/workflows/ci.yml with lint, typecheck, and test jobs
    gating every PR and push to master
  - Rewrite tox.ini to run full suite across Python 3.10/3.11/3.12
  - Add coverage config to pytest.ini
  - Update CHANGELOG
---
 .github/workflows/ci.yml | 112 +++++++++++++++++++++++++++++++++++++++
 CHANGELOG.md             |  13 ++++-
 pytest.ini               |  12 ++++-
 tox.ini                  |  43 ++++++++-------
 4 files changed, 155 insertions(+), 25 deletions(-)
 create mode 100644 .github/workflows/ci.yml

diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000..cf0d64e0
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,112 @@
+name: CI
+
+on:
+  push:
+    branches: [master]
+  pull_request:
+    branches: [master]
+
+jobs:
+  lint:
+    name: Lint
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: "3.11"
+
+      - name: Install lint tools
+        run: pip install black==25.1.0 isort==5.13.2 "flake8==7.2.0"
+
+      - name: black
+        run: black --check .
+
+      - name: isort
+        run: isort --check .
+
+      - name: flake8
+        run: flake8 .
+
+      - name: snake_case
+        run: |
+          find domino -name "*.py" \
+            | grep -v "domino/_impl/" \
+            | grep -v "domino/airflow/" \
+            | xargs python scripts/check_snake_case.py
+
+  typecheck:
+    name: Type check
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: "3.11"
+
+      - name: Install package and type stubs
+        run: |
+          pip install -e .
+          pip install "mypy==1.15.0" \
+            types-pyyaml \
+            types-requests \
+            types-retry \
+            types-pytz \
+            types-tabulate \
+            types-python-dateutil \
+            types-redis \
+            types-protobuf \
+            types-frozendict \
+            types-typing-extensions \
+            types-urllib3
+
+      - name: mypy
+        run: |
+          mypy domino/ \
+            --no-warn-no-return \
+            --namespace-packages \
+            --explicit-package-bases \
+            --ignore-missing-imports \
+            --follow-imports=silent \
+            --python-version=3.10
+
+  test:
+    name: Test (Python ${{ matrix.python-version }})
+    runs-on: ubuntu-latest
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: ["3.10", "3.11", "3.12"]
+
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: ${{ matrix.python-version }}
+
+      - name: Install dependencies
+        run: |
+          pip install -e .
+          pip install pytest pytest-cov requests-mock docker pytest-mock
+
+      - name: Run tests
+        run: |
+          pytest tests/ \
+            --ignore=tests/agents \
+            --ignore=tests/integration \
+            --ignore=tests/scripts \
+            --ignore=tests/test_operator.py \
+            --ignore=tests/test_spark_operator.py \
+            -v --tb=short \
+            --cov=domino \
+            --cov-report=xml \
+            --cov-report=term-missing
+
+      - name: Upload coverage report
+        uses: actions/upload-artifact@v4
+        with:
+          name: coverage-${{ matrix.python-version }}
+          path: coverage.xml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dd18ecdd..88fa5f4e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,8 +5,12 @@ All notable changes to the `python-domino` library will be documented in this fi
 ## [Unreleased]
 
 ### Added
-* Updated app_publish() to allow selecting branch/commitRef
-* Updated app_publish() to allow selecing specific app
+* `app_publish()` now accepts `branch` and `commit_id` parameters to launch an app from a specific git ref.
+* `app_publish()` now accepts an explicit `app_id` parameter to target a specific app.
+* `scripts/check_snake_case.py` — AST-based lint script that catches camelCase parameter names in new code.
+* GitHub Actions CI workflow (`.github/workflows/ci.yml`) that runs lint, type-checking, and tests on every PR and push to `master`. All checks must pass before a PR can be merged.
+* 18 new unit tests covering deprecation warnings for all renamed parameters (`tests/test_deprecations.py`).
+* `pyproject.toml` with `isort` and `black` configuration (`profile = "black"`, `target-version = ["py310"]`).
 
 ### Deprecated
 The following public API parameters have been renamed to follow PEP 8 (`snake_case`).
@@ -35,6 +39,11 @@ removed in the next major version.
 | `app_unpublish` | `appId` | `app_id` |
 
 ### Changed
+* Resolved all 38 pre-existing `mypy` type errors across `domino/`, bringing the codebase to a clean `mypy` pass with `--python-version=3.10`.
+* Resolved all `flake8`, `isort`, and `black` formatting errors across the codebase.
+* Updated `.pre-commit-config.yaml` to latest tool versions: `pre-commit-hooks` v5.0.0, `flake8` 7.2.0, `isort` 5.13.2, `black` 25.1.0, `mypy` v1.15.0. Added the `check-snake-case` hook.
+* Updated `tox.ini` to run the full test suite across Python 3.10, 3.11, and 3.12 (previously only ran two files on Python 3.9).
+* Updated `pytest.ini` with coverage configuration.
 
 ## [2.1.0]
 
diff --git a/pytest.ini b/pytest.ini
index 99475bb6..21171eb5 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,3 +1,13 @@
 [pytest]
+norecursedirs = tests/scripts
 
-norecursedirs = tests/scripts/*
+[coverage:run]
+source = domino
+omit =
+    domino/_impl/*
+
+[coverage:report]
+exclude_lines =
+    pragma: no cover
+    if TYPE_CHECKING:
+    @abstractmethod
diff --git a/tox.ini b/tox.ini
index 7b898a1d..752f8a29 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,33 +1,32 @@
-# tox (https://tox.readthedocs.io/) is a tool for running tests
-# in multiple virtualenvs. This configuration file will run the
-# test suite on all supported python versions. To use it, "pip install tox"
-# and then run "tox" from this directory.
-
 [tox]
-envlist = py39,nosdk
+envlist = py310,py311,py312,nosdk
 
-# NOTE: this should be extended to run all tests but there are a LOT of dependencies
-# to be figured out and tests to be fixed. I added some dependencies and the basic
-# pytest command to be run once tests are cleaned up.
 [testenv]
+usedevelop = true
 deps =
-    pytest
-    requests-mock
-    dominodatalab-data
-#    pyspark
-#    apache-airflow
-#    pandas
-setenv =
-    DATA_SDK = yes
+    pytest>=7.4.3
+    pytest-cov>=5.0.0
+    requests-mock>=1.9.3
+    docker>=7.1.0
+    pytest-mock>=3.14.1
 commands =
-#    pytest tests/
-    pytest tests/test_data_sources.py
+    pytest tests/ \
+        --ignore=tests/agents \
+        --ignore=tests/integration \
+        --ignore=tests/scripts \
+        --ignore=tests/test_operator.py \
+        --ignore=tests/test_spark_operator.py \
+        -v --tb=short \
+        --cov=domino \
+        --cov-report=term-missing \
+        {posargs}
 
 [testenv:nosdk]
+usedevelop = true
 deps =
-    pytest
-    requests-mock
+    pytest>=7.4.3
+    requests-mock>=1.9.3
 setenv =
     DATA_SDK = no
 commands =
-    pytest tests/test_data_sources_nods.py
+    pytest tests/test_data_sources_nods.py -v {posargs}

From cd31d944fd0edf983976c1aae87ff701471ab753 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 19:44:58 +0100
Subject: [PATCH 12/20] Updated to removed unnecessary named param

---
 domino/domino.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/domino/domino.py b/domino/domino.py
index 09d40b62..17d93c07 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -1131,7 +1131,7 @@ def app_publish(
             )
         app_id = app_id or self._app_id
         if unpublish_running_apps:
-            self.app_unpublish(app_id=app_id)
+            self.app_unpublish(app_id)
         if app_id is None:
             # No App Exists creating one
             app_id = self.__app_create(hardware_tier_id=hardware_tier_id)

From 8e8d8cad196d09fc244030d21c1e0c67daf503a2 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 19:46:19 +0100
Subject: [PATCH 13/20] - Updated method signature from old camelCase names to
 snake_case in both theheading and parameter list - Documented all 7
 parameters (including the 4 that were already there but undocumented:
 environment_id, external_volume_mount_ids, commit_id, branch, app_id) - Added
 code examples showing the three common new use cases (branch, commit,
 specific app ID) - Added a deprecation note pointing callers from the old
 camelCase names to the new ones - Updated app_unpublish() to document its
 app_id parameter (which was also previously undocumented) - Updated
 app_publish() to include branch and commit_id when publishing

---
 README.adoc | 29 +++++++++++++++++++++++++----
 README.md   | 47 +++++++++++++++++++++++++++++++++++++++++------
 2 files changed, 66 insertions(+), 10 deletions(-)

diff --git a/README.adoc b/README.adoc
index 3a8d6e0b..e4a54b3c 100644
--- a/README.adoc
+++ b/README.adoc
@@ -525,17 +525,38 @@ Retrieve a file from the Domino server in a project from its path and commit id.
 
 === Apps
 
-==== app_publish(unpublishRunningApps=True, hardwareTierId=None)
+==== app_publish(unpublish_running_apps=True, hardware_tier_id=None, environment_id=None, external_volume_mount_ids=None, commit_id=None, branch=None, app_id=None)
 
 Publish an app within a project, or republish an existing app.
 
-* _unpublishRunningApps:_ (Defaults to True) Check for an active app instance in the current project and unpublish it before re/publishing.
-* _hardwareTierId:_ (Optional) Launch the app on the specified hardware tier.
+* _unpublish_running_apps:_ (Defaults to `true`) Check for an active app instance in the current project and stop it before re/publishing.
+* _hardware_tier_id:_ (Optional) Launch the app on the specified hardware tier ID.
+* _environment_id:_ (Optional) Launch the app with the specified environment ID.
+* _external_volume_mount_ids:_ (Optional) List of external volume mount IDs to attach to the app.
+* _commit_id:_ (Optional) Launch the app from a specific commit. Cannot be combined with `branch`.
+* _branch:_ (Optional) Launch the app from the tip of a specific branch. Cannot be combined with `commit_id`.
+* _app_id:_ (Optional) The ID of the app to publish. If omitted, the project's default app is used (or a new one is created if none exists).
 
-==== app_unpublish()
+[source,python]
+----
+# Publish from a specific branch
+d.app_publish(branch="my-feature-branch")
+
+# Publish from a specific commit
+d.app_publish(commit_id="abc123def456")
+
+# Publish a specific app by ID
+d.app_publish(app_id="aabbccddeeff001122334457")
+----
+
+NOTE: The parameters `unpublishRunningApps`, `hardwareTierId`, `environmentId`, `externalVolumeMountIds`, `commitId`, and `appId` are deprecated and will be removed in the next major version. Use the `snake_case` equivalents listed above.
+
+==== app_unpublish(app_id=None)
 
 Stop the running app in the project.
 
+* _app_id:_ (Optional) The ID of the app to stop. If omitted, the project's default app is used.
+
 === Jobs
 
 ==== job_start(command, commit_id=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None):
diff --git a/README.md b/README.md
index 3afd4f3e..4a1c4935 100644
--- a/README.md
+++ b/README.md
@@ -534,21 +534,56 @@ Retrieve a file from the Domino server in a project from its path and commit id.
 
 ## Apps
 
-### app_publish(unpublishRunningApps=True, hardwareTierId=None)
+### app_publish(unpublish_running_apps=True, hardware_tier_id=None, environment_id=None, external_volume_mount_ids=None, commit_id=None, branch=None, app_id=None)
 
 Publish an app within a project, or republish an existing app.
 
--   *unpublishRunningApps:* (Defaults to True) Check for an active app
-    instance in the current project and unpublish it before
+-   *unpublish_running_apps:* (Defaults to `True`) Check for an active
+    app instance in the current project and stop it before
     re/publishing.
 
--   *hardwareTierId:* (Optional) Launch the app on the specified
-    hardware tier.
+-   *hardware_tier_id:* (Optional) Launch the app on the specified
+    hardware tier ID.
 
-### app_unpublish()
+-   *environment_id:* (Optional) Launch the app with the specified
+    environment ID.
+
+-   *external_volume_mount_ids:* (Optional) List of external volume
+    mount IDs to attach to the app.
+
+-   *commit_id:* (Optional) Launch the app from a specific commit.
+    Cannot be combined with `branch`.
+
+-   *branch:* (Optional) Launch the app from the tip of a specific
+    branch. Cannot be combined with `commit_id`.
+
+-   *app_id:* (Optional) The ID of the app to publish. If omitted, the
+    project's default app is used (or a new one is created if none
+    exists).
+
+```python
+# Publish from a specific branch
+d.app_publish(branch="my-feature-branch")
+
+# Publish from a specific commit
+d.app_publish(commit_id="abc123def456")
+
+# Publish a specific app by ID
+d.app_publish(app_id="aabbccddeeff001122334457")
+```
+
+> **Note:** The parameters `unpublishRunningApps`, `hardwareTierId`,
+> `environmentId`, `externalVolumeMountIds`, `commitId`, and `appId`
+> are deprecated and will be removed in the next major version.
+> Use the `snake_case` equivalents listed above.
+
+### app_unpublish(app_id=None)
 
 Stop the running app in the project.
 
+-   *app_id:* (Optional) The ID of the app to stop. If omitted, the
+    project's default app is used.
+
 ## Jobs
 
 ### job_start(command, commit_id=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None, main_repo_git_ref=None):

From 9eda880fb20780d4d023c65f81a9d0363873f8c1 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Tue, 21 Apr 2026 19:51:22 +0100
Subject: [PATCH 14/20] Updated to correct package

---
 .github/workflows/ci.yml | 1 -
 .pre-commit-config.yaml  | 1 -
 2 files changed, 2 deletions(-)

diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index cf0d64e0..ebc05acb 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -59,7 +59,6 @@ jobs:
             types-redis \
             types-protobuf \
             types-frozendict \
-            types-typing-extensions \
             types-urllib3
 
       - name: mypy
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c5f7f1df..1a981b7a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -55,5 +55,4 @@ repos:
           - "types-redis"
           - "types-protobuf"
           - "types-frozendict"
-          - "types-typing-extensions"
           - "types-urllib3"

From 7fc10dddc80b4acdc14cc94a4a202668b45b11f3 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Wed, 22 Apr 2026 20:57:48 +0100
Subject: [PATCH 15/20] #231 add branch parameter to job_start() for git-based
 projects

---
 domino/domino.py   | 18 ++++++++++++++++-
 tests/test_jobs.py | 49 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/domino/domino.py b/domino/domino.py
index 17d93c07..0070b864 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -424,6 +424,7 @@ def job_start(  # noqa: C901
         external_volume_mounts: Optional[List[str]] = None,
         title: Optional[str] = None,
         main_repo_git_ref: Optional[dict] = None,
+        branch: Optional[str] = None,
     ) -> dict:
         """
         Starts a Domino Job via V4 API
@@ -494,9 +495,24 @@ def job_start(  # noqa: C901
                                                         "type": "branches",
                                                         "value": "my-feature-branch"
                                                     }
-                                                    Supported types: "branches", "tags"
+                                                    Supported types: "branches", "tags".
+                                                    Cannot be combined with branch.
+        :param branch:                              string (Optional)
+                                                    Convenience parameter. For git-based projects, launch the job
+                                                    from the tip of the specified branch. Cannot be combined with
+                                                    commit_id or main_repo_git_ref.
         :return: Returns created Job details (number, id etc)
         """
+        if branch and commit_id:
+            raise ValueError(
+                "Only one of branch or commit_id may be specified, not both."
+            )
+        if branch and main_repo_git_ref:
+            raise ValueError(
+                "Only one of branch or main_repo_git_ref may be specified, not both."
+            )
+        if branch:
+            main_repo_git_ref = {"type": "branches", "value": branch}
 
         def validate_on_demand_spark_cluster_properties(max_execution_slot_per_user):
             self.log.debug(
diff --git a/tests/test_jobs.py b/tests/test_jobs.py
index bbdd4d3c..6538b0f6 100644
--- a/tests/test_jobs.py
+++ b/tests/test_jobs.py
@@ -376,6 +376,55 @@ def test_job_start_sends_main_repo_git_ref(requests_mock, dummy_hostname):
     assert jobs_start_request.json()["mainRepoGitRef"] == git_ref
 
 
+@pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup")
+def test_job_start_branch_sets_main_repo_git_ref(requests_mock, dummy_hostname):
+    """
+    Confirm that the branch convenience parameter is translated to mainRepoGitRef
+    in the jobs/start request body.
+    """
+    requests_mock.get(
+        f"{dummy_hostname}/v4/jobs/{MOCK_JOB_ID}",
+        json=MOCK_JOB_RESPONSE_COMPLETED,
+    )
+
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    d.job_start_blocking(
+        command="foo.py", branch="my-feature-branch", poll_freq=1, max_poll_time=1
+    )
+
+    jobs_start_request = next(
+        req for req in requests_mock.request_history if req.path == "/v4/jobs/start"
+    )
+    assert jobs_start_request.json()["mainRepoGitRef"] == {
+        "type": "branches",
+        "value": "my-feature-branch",
+    }
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup")
+def test_job_start_raises_if_branch_and_commit_id_both_provided(dummy_hostname):
+    """
+    Confirm that providing both branch and commit_id raises a ValueError.
+    """
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    with pytest.raises(ValueError, match="Only one of branch or commit_id"):
+        d.job_start(command="foo.py", branch="my-branch", commit_id="abc123")
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup")
+def test_job_start_raises_if_branch_and_main_repo_git_ref_both_provided(dummy_hostname):
+    """
+    Confirm that providing both branch and main_repo_git_ref raises a ValueError.
+    """
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    with pytest.raises(ValueError, match="Only one of branch or main_repo_git_ref"):
+        d.job_start(
+            command="foo.py",
+            branch="my-branch",
+            main_repo_git_ref={"type": "branches", "value": "other-branch"},
+        )
+
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup")
 def test_job_status_ignores_RequestException_and_times_out(
     requests_mock, dummy_hostname

From 33d7b5cdd6dc08afadf45a778ef64d7bf1bdef84 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Wed, 22 Apr 2026 21:04:40 +0100
Subject: [PATCH 16/20] #127 - make app_id a public property

---
 domino/domino.py   | 25 +++++++++++++------------
 tests/test_app.py  | 36 +++++++++++++++++++++++++++++++++++-
 tests/test_apps.py | 31 +++++++++++++++++++++++++++++--
 3 files changed, 77 insertions(+), 15 deletions(-)

diff --git a/domino/domino.py b/domino/domino.py
index 0070b864..ad072b0c 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -1145,7 +1145,7 @@ def app_publish(
             raise ValueError(
                 "Only one of commit_id or branch may be specified, not both."
             )
-        app_id = app_id or self._app_id
+        app_id = app_id or self.app_id
         if unpublish_running_apps:
             self.app_unpublish(app_id)
         if app_id is None:
@@ -1174,17 +1174,23 @@ def app_unpublish(self, app_id=None, **kwargs):
                 "appId is deprecated, use app_id", DeprecationWarning, stacklevel=2
             )
             app_id = kwargs.pop("appId")
-        app_id = app_id or self._app_id
+        app_id = app_id or self.app_id
         if app_id is None:
             return
-        status = self.__app_get_status(app_id)
+        status = self.app_get_status(app_id)
         self.log.debug(f"App {app_id} status={status}")
         if status and status != "Stopped" and status != "Failed":
             url = self._routes.app_stop(app_id)
             response = self.request_manager.post(url)
             return response
 
-    def __app_get_status(self, app_id) -> Optional[str]:
+    def app_get_status(self, app_id: str) -> Optional[str]:
+        """
+        Return the current status of an app, or None if the app does not exist.
+
+        :param app_id: The ID of the app to query.
+        :return: Status string (e.g. "Running", "Stopped", "Failed") or None.
+        """
         if app_id is None:
             return None
         url = self._routes.app_get(app_id)
@@ -2223,20 +2229,15 @@ def project_id(self):
             f"Project '{self._project_name}' not found for owner '{self._owner_username}'"
         )
 
-    # This will fetch app_id of app in current project
     @property
-    def _app_id(self):
+    def app_id(self) -> Optional[str]:
+        """Return the ID of the first app in the current project, or None if no app exists."""
         url = self._routes.app_list(self.project_id)
         response = self._get(url)
         if len(response) != 0:
             app = response[0]
         else:
             return None
-        key = "id"
-        if key in app.keys():
-            app_id = app[key]
-        else:
-            app_id = None
-        return app_id
+        return app.get("id", None)
 
     _csrf_no_check_header = {"Csrf-Token": "nocheck"}
diff --git a/tests/test_app.py b/tests/test_app.py
index f71af1a5..0f67dc92 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -17,7 +17,7 @@ def mock_app_publish_setup(requests_mock, dummy_hostname):
     """
     requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"})
 
-    # Mock app status lookup (GET) used by app_unpublish via __app_get_status
+    # Mock app status lookup (GET) used by app_unpublish via app_get_status
     requests_mock.get(
         f"{dummy_hostname}/v4/modelproducts/{MOCK_APP_ID}",
         json={"id": MOCK_APP_ID, "status": "Running"},
@@ -152,3 +152,37 @@ def test_app_publish_targets_specific_app_id(requests_mock, dummy_hostname):
         if req.path == f"/v4/modelproducts/{MOCK_APP_ID}/start"
     ]
     assert len(start_requests) == 1
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env")
+def test_app_get_status_returns_status(requests_mock, dummy_hostname):
+    """
+    Confirm that app_get_status() is publicly accessible and returns the
+    status string from the API response.
+    """
+    requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"})
+    requests_mock.get(
+        f"{dummy_hostname}/v4/modelproducts/{MOCK_APP_ID}",
+        json={"id": MOCK_APP_ID, "status": "Running"},
+    )
+
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    status = d.app_get_status(MOCK_APP_ID)
+    assert status == "Running"
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env")
+def test_app_get_status_returns_none_when_status_missing(requests_mock, dummy_hostname):
+    """
+    Confirm that app_get_status() returns None when the API response
+    does not contain a status field.
+    """
+    requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"})
+    requests_mock.get(
+        f"{dummy_hostname}/v4/modelproducts/{MOCK_APP_ID}",
+        json={"id": MOCK_APP_ID},
+    )
+
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    status = d.app_get_status(MOCK_APP_ID)
+    assert status is None
diff --git a/tests/test_apps.py b/tests/test_apps.py
index 67ce9088..bb19c678 100644
--- a/tests/test_apps.py
+++ b/tests/test_apps.py
@@ -30,7 +30,7 @@ def base_mocks(requests_mock, dummy_hostname):
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_app_publish_starts_existing_app(requests_mock, dummy_hostname):
-    # _app_id: returns existing app
+    # app_id: returns existing app
     requests_mock.get(
         f"{dummy_hostname}/v4/modelProducts?projectId={MOCK_PROJECT_ID}",
         json=[MOCK_APP],
@@ -52,7 +52,7 @@ def test_app_publish_starts_existing_app(requests_mock, dummy_hostname):
 
 @pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
 def test_app_publish_creates_app_when_none_exists(requests_mock, dummy_hostname):
-    # _app_id returns empty list — no app exists
+    # app_id returns empty list — no app exists
     requests_mock.get(
         f"{dummy_hostname}/v4/modelProducts?projectId={MOCK_PROJECT_ID}", json=[]
     )
@@ -174,3 +174,30 @@ def test_app_unpublish_does_nothing_when_app_already_stopped(
     d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
     result = d.app_unpublish()
     assert result is None
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
+def test_app_id_returns_id_when_app_exists(requests_mock, dummy_hostname):
+    """
+    Confirm that the public app_id property returns the ID of the first app
+    in the current project.
+    """
+    requests_mock.get(
+        f"{dummy_hostname}/v4/modelProducts?projectId={MOCK_PROJECT_ID}",
+        json=[MOCK_APP],
+    )
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    assert d.app_id == MOCK_APP_ID
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
+def test_app_id_returns_none_when_no_app_exists(requests_mock, dummy_hostname):
+    """
+    Confirm that the public app_id property returns None when the project
+    has no apps.
+    """
+    requests_mock.get(
+        f"{dummy_hostname}/v4/modelProducts?projectId={MOCK_PROJECT_ID}", json=[]
+    )
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    assert d.app_id is None

From ac2c07754b9ca07362f25124dc3fb457837f7cb6 Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Wed, 22 Apr 2026 21:07:29 +0100
Subject: [PATCH 17/20] #174 - handle dict input in _validate_hardware_tier_id

---
 domino/domino.py   |  6 ++++--
 tests/test_jobs.py | 44 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/domino/domino.py b/domino/domino.py
index ad072b0c..65d6b1e5 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -5,7 +5,7 @@
 import re
 import time
 import warnings
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any, Dict, List, Optional, Tuple, Union
 
 import polling2
 import requests
@@ -2104,7 +2104,9 @@ def _validate_environment_id(self, environment_id) -> bool:
             f"{environment_id} environment not found"
         )
 
-    def _validate_hardware_tier_id(self, hardware_tier_id: str) -> bool:
+    def _validate_hardware_tier_id(self, hardware_tier_id: Union[str, Dict]) -> bool:
+        if isinstance(hardware_tier_id, dict):
+            hardware_tier_id = hardware_tier_id.get("value", hardware_tier_id)
         self.log.debug(f"Validating hardware tier id: {hardware_tier_id}")
         for hardware_tier in self.hardware_tiers_list():
             if hardware_tier_id == hardware_tier["hardwareTier"]["id"]:
diff --git a/tests/test_jobs.py b/tests/test_jobs.py
index 6538b0f6..24985c05 100644
--- a/tests/test_jobs.py
+++ b/tests/test_jobs.py
@@ -425,6 +425,50 @@ def test_job_start_raises_if_branch_and_main_repo_git_ref_both_provided(dummy_ho
         )
 
 
+@pytest.mark.usefixtures("clear_token_file_from_env")
+def test_validate_hardware_tier_id_accepts_dict(requests_mock, dummy_hostname):
+    """
+    Confirm that _validate_hardware_tier_id extracts the string value from a dict
+    instead of raising HardwareTierNotFoundException (#174).
+
+    Some call paths (e.g. compute_cluster_properties["workerHardwareTierId"]) pass
+    a dict like {"value": "small-k8s"} rather than a plain string.
+    """
+    requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"})
+    requests_mock.get(
+        f"{dummy_hostname}/v4/gateway/projects/findProjectByOwnerAndName"
+        "?ownerName=anyuser&projectName=anyproject",
+        json={"id": MOCK_PROJECT_ID},
+    )
+    requests_mock.get(
+        f"{dummy_hostname}/v4/projects/{MOCK_PROJECT_ID}/hardwareTiers",
+        json=[{"hardwareTier": {"id": "small-k8s", "name": "Small"}}],
+    )
+
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    assert d._validate_hardware_tier_id({"value": "small-k8s"}) is True
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env")
+def test_validate_hardware_tier_id_accepts_string(requests_mock, dummy_hostname):
+    """
+    Confirm that _validate_hardware_tier_id still works with a plain string ID.
+    """
+    requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"})
+    requests_mock.get(
+        f"{dummy_hostname}/v4/gateway/projects/findProjectByOwnerAndName"
+        "?ownerName=anyuser&projectName=anyproject",
+        json={"id": MOCK_PROJECT_ID},
+    )
+    requests_mock.get(
+        f"{dummy_hostname}/v4/projects/{MOCK_PROJECT_ID}/hardwareTiers",
+        json=[{"hardwareTier": {"id": "small-k8s", "name": "Small"}}],
+    )
+
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    assert d._validate_hardware_tier_id("small-k8s") is True
+
+
 @pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup")
 def test_job_status_ignores_RequestException_and_times_out(
     requests_mock, dummy_hostname

From 5952281c1ddbc882c6c0e30bab0f2296fdff548d Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Wed, 22 Apr 2026 21:09:19 +0100
Subject: [PATCH 18/20] #24 / #31 - add files_download() convenience method

---
 domino/domino.py    | 12 +++++++++
 tests/test_files.py | 61 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 73 insertions(+)
 create mode 100644 tests/test_files.py

diff --git a/domino/domino.py b/domino/domino.py
index 65d6b1e5..c8a70795 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -891,6 +891,18 @@ def blobs_get_v2(self, path, commit_id, project_id):
         url = self._routes.blobs_get_v2(path, commit_id, project_id)
         return self.request_manager.get_raw(url)
 
+    def files_download(self, path: str, commit_id: Optional[str] = None):
+        """
+        Download a file from the project by path.
+
+        :param path: Path to the file within the project (e.g. "/README.md").
+        :param commit_id: The commit to download from. Defaults to the latest commit.
+        :return: Raw file content (urllib3 response stream).
+        """
+        if commit_id is None:
+            commit_id = self.commits_list()[0]["id"]
+        return self.blobs_get_v2(path, commit_id, self.project_id)
+
     def fork_project(self, target_name):
         url = self._routes.fork_project(self.project_id)
         request = {"name": target_name}
diff --git a/tests/test_files.py b/tests/test_files.py
new file mode 100644
index 00000000..8c2c20f6
--- /dev/null
+++ b/tests/test_files.py
@@ -0,0 +1,61 @@
+"""
+Unit tests for files_download() convenience method.
+All tests use requests_mock — no live Domino deployment required.
+"""
+
+import pytest
+
+from domino import Domino
+
+MOCK_PROJECT_ID = "aabbccddeeff001122334455"
+MOCK_COMMIT_ID = "aabbcc112233"
+FILE_CONTENT = b"hello world"
+
+
+@pytest.fixture
+def base_mocks(requests_mock, dummy_hostname):
+    requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"})
+    requests_mock.get(
+        f"{dummy_hostname}/v4/gateway/projects/findProjectByOwnerAndName"
+        "?ownerName=anyuser&projectName=anyproject",
+        json={"id": MOCK_PROJECT_ID},
+    )
+    yield
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
+def test_files_download_with_explicit_commit_id(requests_mock, dummy_hostname):
+    """
+    Confirm that files_download() uses the provided commit_id and does not
+    call commits_list (#24, #31).
+    """
+    requests_mock.get(
+        f"{dummy_hostname}/api/projects/v1/projects/{MOCK_PROJECT_ID}"
+        f"/files/{MOCK_COMMIT_ID}//README.md/content",
+        content=FILE_CONTENT,
+    )
+
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    result = d.files_download("/README.md", commit_id=MOCK_COMMIT_ID)
+    assert result.read() == FILE_CONTENT
+
+
+@pytest.mark.usefixtures("clear_token_file_from_env", "base_mocks")
+def test_files_download_defaults_to_latest_commit(requests_mock, dummy_hostname):
+    """
+    Confirm that files_download() fetches the latest commit when commit_id
+    is omitted (#24, #31).
+    """
+    requests_mock.get(
+        f"{dummy_hostname}/v1/projects/anyuser/anyproject/commits",
+        json=[{"id": MOCK_COMMIT_ID}, {"id": "oldercommit"}],
+    )
+    requests_mock.get(
+        f"{dummy_hostname}/api/projects/v1/projects/{MOCK_PROJECT_ID}"
+        f"/files/{MOCK_COMMIT_ID}//README.md/content",
+        content=FILE_CONTENT,
+    )
+
+    d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever")
+    result = d.files_download("/README.md")
+    assert result.read() == FILE_CONTENT

From 86d8ba54ba442966e34d73643c6a2c6eb5fa62db Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Wed, 22 Apr 2026 21:11:22 +0100
Subject: [PATCH 19/20] #122 - document runs_start vs jobs_start

---
 README.adoc      | 17 ++++++++++++++++-
 README.md        | 20 +++++++++++++++++++-
 domino/domino.py |  8 +++++++-
 3 files changed, 42 insertions(+), 3 deletions(-)

diff --git a/README.adoc b/README.adoc
index e4a54b3c..4361e15f 100644
--- a/README.adoc
+++ b/README.adoc
@@ -426,6 +426,19 @@ Remove a tag from a project.
 
 === Executions
 
+NOTE: *`runs_start` vs `job_start` — which should I use?* +
+The SDK exposes two ways to start an execution. `job_start` uses the v4 Jobs API and is recommended for all new work. `runs_start` uses the legacy v1 API and is retained for backwards compatibility only.
+[cols="1,2,2", options="header"]
+|===
+| | `runs_start` / `runs_start_blocking` | `job_start` / `job_start_blocking`
+| *API version* | v1 (legacy) | v4 (current)
+| *Command format* | List of strings: `["main.py", "arg1"]` | Single string: `"main.py arg1"`
+| *Compute clusters* | Not supported | Spark, Ray, Dask, MPI via `compute_cluster_properties`
+| *Git ref targeting* | Commit ID only | Commit ID, branch, or `main_repo_git_ref` dict
+| *External volumes* | Not supported | Supported via `external_volume_mounts`
+| *Recommended for* | Legacy scripts | All new work
+|===
+
 See these code example files:
 
 * {python-domino-repo}/blob/Release-{latest-version}/examples/start_run_and_check_status.py[`start_run_and_check_status.py`^]
@@ -559,9 +572,11 @@ Stop the running app in the project.
 
 === Jobs
 
+NOTE: Prefer `job_start` over `runs_start` for all new work. See the <> section for a full comparison.
+
 ==== job_start(command, commit_id=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None):
 
-Start a new job (execution) in the project.
+Start a new job (execution) in the project using the v4 Jobs API.
 
 * _command (string):_ Command to execute in Job.
 For example: `domino.job_start(command="main.py arg1 arg2")`
diff --git a/README.md b/README.md
index 4a1c4935..4e15c7c6 100644
--- a/README.md
+++ b/README.md
@@ -404,6 +404,22 @@ Remove a tag from a project.
 
 ## Executions
 
+> **`runs_start` vs `job_start` — which should I use?**
+>
+> The SDK exposes two ways to start an execution:
+>
+> | | `runs_start` / `runs_start_blocking` | `job_start` / `job_start_blocking` |
+> |---|---|---|
+> | **API version** | v1 (legacy) | v4 (current) |
+> | **Command format** | List of strings: `["main.py", "arg1"]` | Single string: `"main.py arg1"` |
+> | **Hardware tier** | By name (`tier`) | By name (`hardware_tier_name`) or ID (`hardware_tier_id`) |
+> | **Compute clusters** | Not supported | Spark, Ray, Dask, MPI via `compute_cluster_properties` |
+> | **Git ref targeting** | By commit ID only | By commit ID, branch, or raw `main_repo_git_ref` dict |
+> | **External volumes** | Not supported | Supported via `external_volume_mounts` |
+> | **Recommended for** | Legacy scripts and simple use cases | All new work |
+>
+> Use `job_start` for all new development. `runs_start` is retained for backwards compatibility.
+
 See these code example files:
 
 -   [`start_run_and_check_status.py`](https://github.com/dominodatalab/python-domino/blob/Release-2.1.0/examples/start_run_and_check_status.py)
@@ -586,9 +602,11 @@ Stop the running app in the project.
 
 ## Jobs
 
+> **Prefer `job_start` over `runs_start` for all new work.** See the [Executions](#executions) section for a full comparison.
+
 ### job_start(command, commit_id=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None, main_repo_git_ref=None):
 
-Start a new job (execution) in the project.
+Start a new job (execution) in the project using the v4 Jobs API.
 
 -   *command (string):* Command to execute in Job. For example:
     `domino.job_start(command="main.py arg1 arg2")`
diff --git a/domino/domino.py b/domino/domino.py
index c8a70795..9e763099 100644
--- a/domino/domino.py
+++ b/domino/domino.py
@@ -136,6 +136,10 @@ def runs_start(
         publish_api_endpoint=None,
         **kwargs,
     ):
+        """
+        Start a run via the legacy v1 Runs API. For new work, prefer job_start() which uses
+        the v4 Jobs API and supports compute clusters, external volumes, and branch targeting.
+        """
         if "isDirect" in kwargs:
             warnings.warn(
                 "isDirect is deprecated, use is_direct",
@@ -427,7 +431,9 @@ def job_start(  # noqa: C901
         branch: Optional[str] = None,
     ) -> dict:
         """
-        Starts a Domino Job via V4 API
+        Start a Domino Job via the v4 Jobs API. Preferred over runs_start() for all new work —
+        supports hardware tiers, compute clusters (Spark/Ray/Dask/MPI), external volumes, and
+        git ref targeting by branch or commit.
         :param command:                             string
                                                     Command to execute in Job
                                                     >> domino.job_start(command="main.py arg1 arg2")

From 067a55a7cf92c7d8e54bc96a557128e861c299ec Mon Sep 17 00:00:00 2001
From: Blake Moore 
Date: Wed, 22 Apr 2026 21:13:36 +0100
Subject: [PATCH 20/20] Update README with the changes

---
 README.adoc | 44 +++++++++++++++++++++++++++++++++++++++++---
 README.md   | 46 +++++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 84 insertions(+), 6 deletions(-)

diff --git a/README.adoc b/README.adoc
index 4361e15f..b8f06ce1 100644
--- a/README.adoc
+++ b/README.adoc
@@ -536,6 +536,21 @@ Retrieve a file from the Domino server in a project from its path and commit id.
 * _commit_id:_ ID of the commit to retrieve the file from.
 * _project_id:_ ID of the project to retrieve the file from.
 
+==== files_download(path, commit_id=None)
+
+Convenience wrapper around `blobs_get_v2` that downloads a file by path without
+needing to look up a commit ID first.
+
+* _path (string):_ Path to the file within the project, e.g. `"/README.md"`.
+* _commit_id (string):_ (Optional) The commit to download from. Defaults to the latest commit.
+
+Returns a raw file content stream (urllib3 response).
+
+[source,python]
+----
+content = d.files_download("/results/output.csv").read()
+----
+
 === Apps
 
 ==== app_publish(unpublish_running_apps=True, hardware_tier_id=None, environment_id=None, external_volume_mount_ids=None, commit_id=None, branch=None, app_id=None)
@@ -570,18 +585,38 @@ Stop the running app in the project.
 
 * _app_id:_ (Optional) The ID of the app to stop. If omitted, the project's default app is used.
 
+==== app_get_status(app_id)
+
+Return the current status of an app.
+
+* _app_id (string):_ The ID of the app to query.
+
+Returns the status string (e.g. `"Running"`, `"Stopped"`, `"Failed"`), or `None` if the app does not exist.
+
+==== app_id
+
+Read-only property. Returns the ID of the first app in the current project, or `None` if no app exists.
+
+[source,python]
+----
+print(d.app_id)  # e.g. "aabbccddeeff001122334457"
+----
+
 === Jobs
 
 NOTE: Prefer `job_start` over `runs_start` for all new work. See the <> section for a full comparison.
 
-==== job_start(command, commit_id=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None):
+==== job_start(command, commit_id=None, branch=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None, main_repo_git_ref=None):
 
 Start a new job (execution) in the project using the v4 Jobs API.
 
 * _command (string):_ Command to execute in Job.
 For example: `domino.job_start(command="main.py arg1 arg2")`
-* _commit_id (string):_ (Optional) The `commitId` to launch from.
-If not provided, the job launches from the latest commit.
+* _commit_id (string):_ (Optional) The commit ID to launch from.
+If not provided, the job launches from the latest commit. Mutually exclusive with `branch`.
+* _branch (string):_ (Optional) The branch name to launch from.
+If not provided, the job launches from the latest commit on the default branch.
+Mutually exclusive with `commit_id` and `main_repo_git_ref`.
 * _hardware_tier_name (string):_ (Optional) The hardware tier NAME to launch job in.
 If not provided, the project's default tier is used.
 * _environment_id (string):_ (Optional) The environment ID with which to launch the job.
@@ -629,6 +664,9 @@ If `on_demand_spark_cluster_properties` and `compute_cluster_properties` are bot
 * _external_volume_mounts (List[string]):_ (Optional) External volume mount IDs to mount to execution.
 If not provided, the job launches with no external volumes mounted.
 * _title (string):_ (Optional) Title for Job.
+* _main_repo_git_ref (dict):_ (Optional) Raw git ref dict for advanced use cases.
+For example: `{"type": "branches", "value": "my-feature-branch"}` or `{"type": "tags", "value": "v1.2.3"}`.
+Mutually exclusive with `branch`.
 
 ==== job_stop(job_id, commit_results=True):
 
diff --git a/README.md b/README.md
index 4e15c7c6..42051fe2 100644
--- a/README.md
+++ b/README.md
@@ -548,6 +548,22 @@ Retrieve a file from the Domino server in a project from its path and commit id.
 -   *commit_id:* ID of the commit to retrieve the file from.
 -   *project_id:* ID of the project to retrieve the file from.
 
+### files_download(path, commit_id=None)
+
+Convenience wrapper around `blobs_get_v2` that downloads a file by path without
+needing to look up a commit ID first.
+
+-   *path (string):* Path to the file within the project, e.g. `"/README.md"`.
+
+-   *commit_id (string):* (Optional) The commit to download from. Defaults to
+    the latest commit in the project.
+
+Returns a raw file content stream (urllib3 response). Example:
+
+```python
+content = d.files_download("/results/output.csv").read()
+```
+
 ## Apps
 
 ### app_publish(unpublish_running_apps=True, hardware_tier_id=None, environment_id=None, external_volume_mount_ids=None, commit_id=None, branch=None, app_id=None)
@@ -600,19 +616,43 @@ Stop the running app in the project.
 -   *app_id:* (Optional) The ID of the app to stop. If omitted, the
     project's default app is used.
 
+### app_get_status(app_id)
+
+Return the current status of an app.
+
+-   *app_id (string):* The ID of the app to query.
+
+Returns the status string (e.g. `"Running"`, `"Stopped"`, `"Failed"`), or
+`None` if the app does not exist.
+
+### app_id
+
+Read-only property. Returns the ID of the first app in the current project,
+or `None` if no app exists. Useful when you need the app ID to pass to
+`app_get_status()` or `app_publish()`.
+
+```python
+print(d.app_id)  # e.g. "aabbccddeeff001122334457"
+```
+
 ## Jobs
 
 > **Prefer `job_start` over `runs_start` for all new work.** See the [Executions](#executions) section for a full comparison.
 
-### job_start(command, commit_id=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None, main_repo_git_ref=None):
+### job_start(command, commit_id=None, branch=None, hardware_tier_name=None, environment_id=None, on_demand_spark_cluster_properties=None, compute_cluster_properties=None, external_volume_mounts=None, title=None, main_repo_git_ref=None):
 
 Start a new job (execution) in the project using the v4 Jobs API.
 
 -   *command (string):* Command to execute in Job. For example:
     `domino.job_start(command="main.py arg1 arg2")`
 
--   *commit_id (string):* (Optional) The `commitId` to launch from. If
-    not provided, the job launches from the latest commit.
+-   *commit_id (string):* (Optional) The commit ID to launch from. If
+    not provided, the job launches from the latest commit. Mutually
+    exclusive with `branch`.
+
+-   *branch (string):* (Optional) The branch name to launch from. If
+    not provided, the job launches from the latest commit on the default
+    branch. Mutually exclusive with `commit_id` and `main_repo_git_ref`.
 
 -   *hardware_tier_name (string):* (Optional) The hardware tier NAME
     to launch job in. If not provided, the project’s default tier is