diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..a1e92dc --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,72 @@ +name: CI + +on: + push: + branches: [main, master, develop] + pull_request: + branches: [main, master, develop] + workflow_dispatch: # Allow manual triggering + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install ruff mypy + pip install -e ".[dev]" + - name: Run Ruff linter + run: ruff check src/ + - name: Run Ruff formatter check + run: ruff format --check src/ + - name: Run MyPy + run: mypy src/omophub + + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10", "3.11", "3.12", "3.13"] + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[dev]" + - name: Run unit tests with coverage + run: | + pytest tests/unit --cov=omophub --cov-report=xml --cov-report=term-missing + - name: Upload coverage to Codecov + if: matrix.python-version == '3.12' + uses: codecov/codecov-action@v5 + with: + files: ./coverage.xml + fail_ci_if_error: false + token: ${{ secrets.CODECOV_TOKEN }} + + integration: + # Only run on push to main/develop, not on PRs (saves ~7-8 min per PR) + if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[dev]" + - name: Run integration tests + env: + TEST_API_KEY: ${{ secrets.TEST_API_KEY }} + run: | + pytest tests/integration -v diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..37cf58e --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,47 @@ +name: Publish to PyPI + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 # Required for hatch-vcs to get version from tags + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install build tools + run: | + python -m pip install --upgrade pip + pip install build + - name: Build package + run: python -m build + - name: Upload distributions + uses: actions/upload-artifact@v5 + with: + name: release-dists + path: dist/ + + publish: + runs-on: ubuntu-latest + needs: build + environment: + name: pypi + url: https://pypi.org/project/omophub/ + permissions: + id-token: write # Required for trusted publishing + steps: + - name: Download distributions + uses: actions/download-artifact@v5 + with: + name: release-dists + path: dist/ + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a7746d..dd305b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,118 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.6.0] - 2026-04-10 + +### Added + +- **FHIR-to-OMOP Concept Resolver** (`client.fhir`): Translate FHIR coded values into OMOP standard concepts, CDM target tables, and optional Phoebe recommendations in a single API call. + - `resolve()`: Resolve a single FHIR `Coding` (system URI + code) or text-only input via semantic search fallback. Returns the standard concept, target CDM table, domain alignment check, and optional mapping quality signal. + - `resolve_batch()`: Batch-resolve up to 100 FHIR codings per request with inline per-item error reporting. Failed items do not fail the batch. + - `resolve_codeable_concept()`: Resolve a FHIR `CodeableConcept` with multiple codings. Automatically picks the best match per OHDSI vocabulary preference (SNOMED > RxNorm > LOINC > CVX > ICD-10). Falls back to the `text` field via semantic search when no coding resolves. +- New TypedDict types for FHIR resolver: `FhirResolveResult`, `FhirResolution`, `FhirBatchResult`, `FhirBatchSummary`, `FhirCodeableConceptResult`, `ResolvedConcept`, `RecommendedConceptOutput`. +- Both sync (`OMOPHub`) and async (`AsyncOMOPHub`) clients support FHIR resolver methods via `client.fhir.*`. + +### Changed + +- **Extracted shared response parsing** (`_request.py`): The duplicated JSON decode / error-handling / rate-limit-retry logic across `Request._parse_response`, `Request._parse_response_raw`, `AsyncRequest._parse_response`, and `AsyncRequest._parse_response_raw` (4 copies of ~50 lines each) is now a single `_parse_and_raise()` module-level function. All four methods delegate to it, eliminating the risk of divergence bugs. +- **Fixed `paginate_async` signature** (`_pagination.py`): The type hint now correctly declares `Callable[[int, int], Awaitable[tuple[...]]]` instead of `Callable[[int, int], tuple[...]]`, and the runtime `hasattr(__await__)` duck-typing hack has been replaced with a clean `await`. +- **`AsyncSearch.semantic_iter`** now delegates to `paginate_async` instead of manually reimplementing the pagination loop, matching the sync `semantic_iter` which already uses `paginate_sync`. + +### Fixed + +- Python prerequisite in CONTRIBUTING.md corrected from `3.9+` to `3.10+` (matching `pyproject.toml`). +- `__all__` in `types/__init__.py` sorted per RUF022. + +## [1.5.1] - 2026-04-08 + +### Fixed + +- **Rate-limit handling**: HTTP client now respects the `Retry-After` header on `429 Too Many Requests` responses and applies exponential backoff with jitter on retries. Previous versions retried only on `502/503/504` with a fixed `2^attempt * 0.5s` schedule and did not back off on `429` at all, so a client that hit the server's rate limit at high volume could burn through thousands of failed requests in a tight loop. The client now honors `Retry-After`, uses exponential backoff with jitter, respects the configured `max_retries`, and caps backoff at 30 seconds. +- Updated `examples/search_concepts.py` to reflect current API. + +## [1.5.0] - 2026-03-26 + +### Added + +- **Bulk lexical search** (`search.bulk_basic()`): Execute up to 50 keyword searches in a single API call. Supports shared defaults for vocabulary, domain, and other filters. Each search is identified by a unique `search_id` for result matching. Maps to `POST /v1/search/bulk`. +- **Bulk semantic search** (`search.bulk_semantic()`): Execute up to 25 natural-language searches using neural embeddings in a single call. Supports per-search similarity thresholds and shared defaults. Includes query enhancement data (abbreviation expansion, misspelling correction). Maps to `POST /v1/search/semantic-bulk`. +- New TypedDict types for bulk search: `BulkSearchInput`, `BulkSearchDefaults`, `BulkSearchResponse`, `BulkSearchResultItem`, `BulkSemanticSearchInput`, `BulkSemanticSearchDefaults`, `BulkSemanticSearchResponse`, `BulkSemanticSearchResultItem`, `QueryEnhancement`. +- Both sync (`OMOPHub`) and async (`AsyncOMOPHub`) clients support bulk search methods. + +### Changed + +- Updated `__all__` exports to alphabetical order (ruff RUF022 compliance). +- `BulkSearchInput` and `BulkSemanticSearchInput` now use `Required[str]` for `search_id` and `query` fields for proper type checking. + +## [1.4.1] - 2026-02-28 + +### Fixed + +- User-Agent header now reports actual SDK version (e.g., `OMOPHub-SDK-Python/1.4.1`) instead of hardcoded `0.1.0`. Version is resolved at runtime via `importlib.metadata`. + +## [1.4.0] - 2026-02-23 + +### Added + +- **Semantic search** (`search.semantic()`, `search.semantic_iter()`): Natural language concept search using neural embeddings. Search for clinical intent like "high blood sugar levels" to find diabetes-related concepts. Supports filtering by vocabulary, domain, standard concept, concept class, and minimum similarity threshold. `semantic_iter()` provides automatic pagination. +- **Similarity search** (`search.similar()`): Find concepts similar to a reference concept ID, concept name, or natural language query. Three algorithm options: `'semantic'` (neural embeddings), `'lexical'` (string matching), and `'hybrid'` (combined). Configurable similarity threshold with optional detailed scores and explanations. + +## [1.3.1] - 2026-01-24 + +### Fixed + +- Fixed `search.basic_iter()` pagination bug that caused only the first page of results to be returned. The iterator now correctly fetches all pages when iterating through search results. + +### Changed + +- Added `get_raw()` method to internal request classes for retrieving full API responses with pagination metadata. +- Expanded `search.basic_iter()` method signature to explicitly list all filter parameters instead of using `**kwargs`. + +## [1.3.0] - 2026-01-06 + +### Changes + +**Parameter Renames (for API consistency):** +- `search.autocomplete()`: `max_suggestions` → `page_size` +- `concepts.suggest()`: `vocabulary` → `vocabulary_ids`, `domain` → `domain_ids`, `limit` → `page_size` +- `concepts.related()`: `relatedness_types` → `relationship_types` +- `concepts.relationships()`: `relationship_type` → `relationship_ids` +- `relationships.get()`: `relationship_type` → `relationship_ids`, `target_vocabulary` → `vocabulary_ids` +- `hierarchy.ancestors()`: `vocabulary_id` → `vocabulary_ids`, `include_deprecated` → `include_invalid` +- `hierarchy.descendants()`: `vocabulary_id` → `vocabulary_ids`, `include_deprecated` → `include_invalid` + +**Simplified APIs (removed parameters):** +- `vocabularies.get()`: Removed `include_stats`, `include_domains` (use `stats()` method instead) +- `vocabularies.domains()`: Removed pagination parameters, now returns all domains +- `domains.list()`: Simplified to single `include_stats` parameter +- `domains.concepts()`: Removed `concept_class_ids`, added `include_invalid` +- `mappings.get()`: Simplified to `target_vocabulary`, `include_invalid`, `vocab_release` +- `relationships.types()`: Removed all filtering parameters + +**Default Changes:** +- `vocabularies.list()`: Default `page_size` changed from 100 to 20 +- `concepts.batch()`: Default `standard_only` changed from `False` to `True` + +### Added + +- `vocabularies.domain_stats(vocabulary_id, domain_id)` - Get statistics for a specific domain within a vocabulary +- `vocabularies.concept_classes()` - Get all concept classes +- `hierarchy.get(concept_id)` - Get complete hierarchy (ancestors and descendants) in one call +- `vocab_release` parameter to `concepts.get()`, `concepts.get_by_code()`, `mappings.get()`, `mappings.map()` +- `include_hierarchy` parameter to `concepts.get()` and `concepts.get_by_code()` +- Pagination support to `concepts.suggest()` +- `domain_ids`, `standard_only`, `include_reverse` parameters to `relationships.get()` + +## [1.2.0] - 2025-12-09 + +### Added + +- `include_synonyms` and `include_relationships` parameters to `concepts.get_by_code()` method for retrieving concept synonyms and relationships in a single request. + +### Changed + +- User-Agent header updated to `OMOPHub-SDK-Python/{version}`. + ## [0.1.0] - 2025-12-01 ### Added @@ -27,5 +139,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Full type hints and PEP 561 compliance - HTTP/2 support via httpx -[Unreleased]: https://github.com/omopHub/omophub-python/compare/v0.1.0...HEAD +[Unreleased]: https://github.com/omopHub/omophub-python/compare/v1.6.0...HEAD +[1.6.0]: https://github.com/omopHub/omophub-python/compare/v1.5.1...v1.6.0 +[1.5.1]: https://github.com/omopHub/omophub-python/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/omopHub/omophub-python/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/omopHub/omophub-python/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/omopHub/omophub-python/compare/v1.3.1...v1.4.0 +[1.3.1]: https://github.com/omopHub/omophub-python/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/omopHub/omophub-python/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/omopHub/omophub-python/compare/v0.1.0...v1.2.0 [0.1.0]: https://github.com/omopHub/omophub-python/releases/tag/v0.1.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..4957456 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,160 @@ +# Contributing to OMOPHub Python SDK + +First off, thank you for considering contributing to OMOPHub! + +## How Can I Contribute? + +### Reporting Bugs + +Before creating bug reports, please check the [existing issues](https://github.com/OMOPHub/omophub-python/issues) to avoid duplicates. + +When creating a bug report, please include: + +- **Python version** (`python --version`) +- **SDK version** (`pip show omophub`) +- **Operating system** +- **Minimal code example** that reproduces the issue +- **Full error traceback** +- **Expected vs actual behavior** + +### Suggesting Features + +Feature requests are welcome! Please open an issue with: + +- Clear description of the feature +- Use case: why would this be useful? +- Possible implementation approach (optional) + +### Pull Requests + +1. **Fork the repository** and create your branch from `main` +2. **Install development dependencies:** + ```bash + git clone https://github.com/YOUR_USERNAME/omophub-python.git + cd omophub-python + pip install -e ".[dev]" + ``` +3. **Make your changes** with clear, descriptive commits +4. **Add tests** for new functionality +5. **Run the test suite:** + ```bash + pytest + ``` +6. **Ensure code style compliance:** + ```bash + ruff check . + ruff format . + mypy src/ + ``` +7. **Update documentation** if needed +8. **Submit a pull request** with a clear description + +## Development Setup + +### Prerequisites + +- Python 3.10+ +- pip + +### Installation + +```bash +# Clone your fork +git clone https://github.com/YOUR_USERNAME/omophub-python.git +cd omophub-python + +# Create virtual environment +python -m venv venv +source venv/bin/activate # On Windows: venv\Scripts\activate + +# Install in development mode +pip install -e ".[dev]" +``` + +### Running Tests + +```bash +# Run all tests +pytest + +# Run with coverage +pytest --cov=omophub --cov-report=html + +# Run specific test file +pytest tests/test_concepts.py + +# Run tests matching a pattern +pytest -k "test_search" +``` + +### Code Style + +We use: +- **Ruff** for linting and formatting +- **mypy** for type checking + +```bash +# Check linting +ruff check . + +# Auto-format code +ruff format . + +# Type checking +mypy src/ +``` + +## Project Structure + +``` +omophub-python/ +├── src/omophub/ +│ ├── __init__.py # Public API exports +│ ├── client.py # OMOPHub client class +│ ├── resources/ # API resource classes +│ │ ├── concepts.py +│ │ ├── search.py +│ │ ├── hierarchy.py +│ │ └── ... +│ ├── types.py # TypedDict definitions +│ └── exceptions.py # Custom exceptions +├── tests/ +│ ├── test_concepts.py +│ ├── test_search.py +│ └── ... +├── examples/ +│ └── ... +└── pyproject.toml +``` + +## Commit Messages + +We follow [Conventional Commits](https://www.conventionalcommits.org/): + +- `feat:` New feature +- `fix:` Bug fix +- `docs:` Documentation changes +- `test:` Adding or updating tests +- `refactor:` Code refactoring +- `chore:` Maintenance tasks + +Examples: +``` +feat: add semantic search endpoint +fix: handle rate limit errors correctly +docs: update README with new examples +test: add tests for batch concept lookup +``` + +## Questions? + +- Open a [GitHub Discussion](https://github.com/OMOPHub/omophub-python/discussions) +- Email: support@omophub.com + +## License + +By contributing, you agree that your contributions will be licensed under the MIT License. + +--- + +Thank you for helping make OMOPHub better! diff --git a/README.md b/README.md index 9733ec1..339bcae 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,32 @@ # OMOPHub Python SDK -[![PyPI version](https://badge.fury.io/py/omophub.svg)](https://badge.fury.io/py/omophub) +**Query millions standardized medical concepts via simple Python API** + +Access SNOMED CT, ICD-10, RxNorm, LOINC, and 90+ OHDSI ATHENA vocabularies without downloading, installing, or maintaining local databases. + +[![PyPI version](https://badge.fury.io/py/omophub.svg)](https://pypi.org/project/omophub/) [![Python Versions](https://img.shields.io/pypi/pyversions/omophub.svg)](https://pypi.org/project/omophub/) -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Codecov](https://codecov.io/gh/omopHub/omophub-python/branch/main/graph/badge.svg)](https://app.codecov.io/gh/omopHub/omophub-python?branch=main) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) +![Downloads](https://img.shields.io/pypi/dm/omophub) + +**[Documentation](https://docs.omophub.com/sdks/python/overview)** · +**[API Reference](https://docs.omophub.com/api-reference)** · +**[Examples](https://github.com/omopHub/omophub-python/tree/main/examples)** + +--- + +## Why OMOPHub? + +Working with OHDSI ATHENA vocabularies traditionally requires downloading multi-gigabyte files, setting up a database instance, and writing complex SQL queries. **OMOPHub eliminates this friction.** -The official Python SDK for [OMOPHub](https://omophub.com) - a medical vocabulary API providing access to OHDSI ATHENA standardized vocabularies including SNOMED CT, ICD-10, RxNorm, LOINC, and 90+ other medical terminologies. +| Traditional Approach | With OMOPHub | +|---------------------|--------------| +| Download 5GB+ ATHENA vocabulary files | `pip install omophub` | +| Set up and maintain database | One API call | +| Write complex SQL with multiple JOINs | Simple Python methods | +| Manually update vocabularies quarterly | Always current data | +| Local infrastructure required | Works anywhere Python runs | ## Installation @@ -15,188 +37,221 @@ pip install omophub ## Quick Start ```python -import omophub +from omophub import OMOPHub -# Initialize the client -client = omophub.OMOPHub(api_key="oh_xxxxxxxxx") +# Initialize client (uses OMOPHUB_API_KEY env variable, or pass api_key="...") +client = OMOPHub() # Get a concept by ID concept = client.concepts.get(201826) print(concept["concept_name"]) # "Type 2 diabetes mellitus" -# Search for concepts -results = client.search.basic("diabetes", vocabulary_ids=["SNOMED", "ICD10CM"]) -for concept in results["concepts"]: - print(f"{concept['concept_id']}: {concept['concept_name']}") +# Search for concepts across vocabularies +results = client.search.basic("metformin", vocabulary_ids=["RxNorm"], domain_ids=["Drug"]) +for c in results["concepts"]: + print(f"{c['concept_id']}: {c['concept_name']}") -# Get concept ancestors -ancestors = client.hierarchy.ancestors(201826, max_levels=3) +# Map ICD-10 code to SNOMED +mappings = client.mappings.get_by_code("ICD10CM", "E11.9", target_vocabulary="SNOMED") -# Map concepts between vocabularies -mappings = client.mappings.get(201826, target_vocabularies=["ICD10CM"]) +# Navigate concept hierarchy +ancestors = client.hierarchy.ancestors(201826, max_levels=3) ``` -## Async Usage +## FHIR-to-OMOP Resolution -```python -import omophub -import asyncio +Resolve FHIR coded values to OMOP standard concepts in one call: -async def main(): - async with omophub.AsyncOMOPHub(api_key="oh_xxx") as client: - concept = await client.concepts.get(201826) - print(concept["concept_name"]) +```python +# Single FHIR Coding → OMOP concept + CDM target table +result = client.fhir.resolve( + system="http://snomed.info/sct", + code="44054006", + resource_type="Condition", +) +print(result["resolution"]["target_table"]) # "condition_occurrence" +print(result["resolution"]["mapping_type"]) # "direct" -asyncio.run(main()) +# ICD-10-CM → traverses "Maps to" automatically +result = client.fhir.resolve( + system="http://hl7.org/fhir/sid/icd-10-cm", + code="E11.9", +) +print(result["resolution"]["standard_concept"]["vocabulary_id"]) # "SNOMED" + +# Batch resolve up to 100 codings +batch = client.fhir.resolve_batch([ + {"system": "http://snomed.info/sct", "code": "44054006"}, + {"system": "http://loinc.org", "code": "2339-0"}, + {"system": "http://www.nlm.nih.gov/research/umls/rxnorm", "code": "197696"}, +]) +print(f"Resolved {batch['summary']['resolved']}/{batch['summary']['total']}") + +# CodeableConcept with vocabulary preference (SNOMED wins over ICD-10) +result = client.fhir.resolve_codeable_concept( + coding=[ + {"system": "http://snomed.info/sct", "code": "44054006"}, + {"system": "http://hl7.org/fhir/sid/icd-10-cm", "code": "E11.9"}, + ], + resource_type="Condition", +) +print(result["best_match"]["resolution"]["source_concept"]["vocabulary_id"]) # "SNOMED" ``` -## Configuration +## Semantic Search -### API Key - -Set your API key in one of three ways: +Use natural language queries to find concepts using neural embeddings: ```python -# 1. Pass directly to client -client = omophub.OMOPHub(api_key="oh_xxxxxxxxx") +# Natural language search - understands clinical intent +results = client.search.semantic("high blood sugar levels") +for r in results["results"]: + print(f"{r['concept_name']} (similarity: {r['similarity_score']:.2f})") -# 2. Set environment variable -# export OMOPHUB_API_KEY=oh_xxxxxxxxx -client = omophub.OMOPHub() +# Filter by vocabulary and set minimum similarity threshold +results = client.search.semantic( + "heart attack", + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + threshold=0.5 +) -# 3. Set module-level variable -import omophub -omophub.api_key = "oh_xxxxxxxxx" -client = omophub.OMOPHub() +# Iterate through all results with auto-pagination +for result in client.search.semantic_iter("chronic kidney disease", page_size=50): + print(f"{result['concept_id']}: {result['concept_name']}") ``` -Get your API key from the [OMOPHub Dashboard](https://dashboard.omophub.com/api-keys). +### Bulk Search -### Additional Options +Search for multiple terms in a single API call — much faster than individual requests: ```python -client = omophub.OMOPHub( - api_key="oh_xxx", - base_url="https://api.omophub.com/v1", # API base URL - timeout=30.0, # Request timeout in seconds - max_retries=3, # Retry attempts for failed requests - vocab_version="2025.2", # Specific vocabulary version -) +# Bulk lexical search (up to 50 queries) +results = client.search.bulk_basic([ + {"search_id": "q1", "query": "diabetes mellitus"}, + {"search_id": "q2", "query": "hypertension"}, + {"search_id": "q3", "query": "aspirin"}, +], defaults={"vocabulary_ids": ["SNOMED"], "page_size": 5}) + +for item in results["results"]: + print(f"{item['search_id']}: {len(item['results'])} results") + +# Bulk semantic search (up to 25 queries) +results = client.search.bulk_semantic([ + {"search_id": "s1", "query": "heart failure treatment options"}, + {"search_id": "s2", "query": "type 2 diabetes medication"}, +], defaults={"threshold": 0.5, "page_size": 10}) ``` -## Resources +### Similarity Search -### Concepts +Find concepts similar to a known concept or natural language query: ```python -# Get concept by ID -concept = client.concepts.get(201826) - -# Get concept by vocabulary code -concept = client.concepts.get_by_code("SNOMED", "73211009") - -# Batch get concepts -result = client.concepts.batch([201826, 4329847, 73211009]) - -# Get autocomplete suggestions -suggestions = client.concepts.suggest("diab", vocabulary="SNOMED", limit=10) - -# Get related concepts -related = client.concepts.related(201826, relatedness_types=["hierarchical", "semantic"]) - -# Get concept relationships -relationships = client.concepts.relationships(201826) +# Find concepts similar to a known concept +results = client.search.similar(concept_id=201826, algorithm="hybrid") +for r in results["results"]: + print(f"{r['concept_name']} (score: {r['similarity_score']:.2f})") + +# Find similar concepts using a natural language query +results = client.search.similar( + query="medications for high blood pressure", + algorithm="semantic", + similarity_threshold=0.6, + vocabulary_ids=["RxNorm"], + include_scores=True, +) ``` -### Search +## Async Support ```python -# Basic search -results = client.search.basic( - "heart attack", - vocabulary_ids=["SNOMED"], - domain_ids=["Condition"], - page=1, - page_size=20, -) +import asyncio +from omophub import AsyncOMOPHub -# Advanced search with facets -results = client.search.advanced( - "myocardial infarction", - vocabularies=["SNOMED", "ICD10CM"], - standard_concepts_only=True, -) +async def main(): + async with AsyncOMOPHub() as client: + concept = await client.concepts.get(201826) + print(concept["concept_name"]) -# Semantic search -results = client.search.semantic("chest pain with shortness of breath") +asyncio.run(main()) +``` -# Fuzzy search (typo-tolerant) -results = client.search.fuzzy("diabetis") # finds "diabetes" +## Use Cases -# Auto-pagination iterator -for concept in client.search.basic_iter("diabetes", page_size=100): - print(concept["concept_name"]) -``` +### ETL & Data Pipelines -### Hierarchy +Validate and map clinical codes during OMOP CDM transformations: ```python -# Get ancestors -ancestors = client.hierarchy.ancestors( - 201826, - max_levels=5, - relationship_types=["Is a"], -) - -# Get descendants -descendants = client.hierarchy.descendants( - 201826, - max_levels=3, - standard_only=True, -) +# Validate that a source code exists and find its standard equivalent +def validate_and_map(source_vocab, source_code): + concept = client.concepts.get_by_code(source_vocab, source_code) + if concept["standard_concept"] != "S": + mappings = client.mappings.get(concept["concept_id"], + target_vocabulary="SNOMED") + return mappings["mappings"][0]["target_concept_id"] + return concept["concept_id"] ``` -### Mappings +### Data Quality Checks -```python -# Get mappings for a concept -mappings = client.mappings.get( - 201826, - target_vocabularies=["ICD10CM", "Read"], - include_mapping_quality=True, -) +Verify codes exist and are valid standard concepts: -# Map concepts to target vocabulary -result = client.mappings.map( - source_concepts=[201826, 4329847], - target_vocabulary="ICD10CM", -) +```python +# Check if all your condition codes are valid +condition_codes = ["E11.9", "I10", "J44.9"] # ICD-10 codes +for code in condition_codes: + try: + concept = client.concepts.get_by_code("ICD10CM", code) + print(f"OK {code}: {concept['concept_name']}") + except omophub.NotFoundError: + print(f"ERROR {code}: Invalid code!") ``` -### Vocabularies +### Phenotype Development + +Explore hierarchies to build comprehensive concept sets: ```python -# List all vocabularies -vocabularies = client.vocabularies.list(include_stats=True) +# Get all descendants of "Type 2 diabetes mellitus" for phenotype +descendants = client.hierarchy.descendants(201826, max_levels=5) +concept_set = [d["concept_id"] for d in descendants["concepts"]] +print(f"Found {len(concept_set)} concepts for T2DM phenotype") +``` + +### Clinical Applications -# Get vocabulary details -snomed = client.vocabularies.get("SNOMED", include_domains=True) +Build terminology lookups into healthcare applications: -# Get vocabulary statistics -stats = client.vocabularies.stats("SNOMED") +```python +# Autocomplete for clinical coding interface +suggestions = client.concepts.suggest("diab", vocabulary_ids=["SNOMED"], page_size=10) +# Returns: ["Diabetes mellitus", "Diabetic nephropathy", "Diabetic retinopathy", ...] ``` -### Domains +## API Resources -```python -# List all domains -domains = client.domains.list(include_statistics=True) +| Resource | Description | Key Methods | +|----------|-------------|-------------| +| `concepts` | Concept lookup and batch operations | `get()`, `get_by_code()`, `batch()`, `suggest()` | +| `search` | Full-text and semantic search | `basic()`, `advanced()`, `semantic()`, `similar()`, `bulk_basic()`, `bulk_semantic()` | +| `hierarchy` | Navigate concept relationships | `ancestors()`, `descendants()` | +| `mappings` | Cross-vocabulary mappings | `get()`, `map()` | +| `vocabularies` | Vocabulary metadata | `list()`, `get()`, `stats()` | +| `domains` | Domain information | `list()`, `get()`, `concepts()` | +| `fhir` | FHIR-to-OMOP resolution | `resolve()`, `resolve_batch()`, `resolve_codeable_concept()` | -# Get domain details -condition = client.domains.get("Condition") +## Configuration -# Get concepts in a domain -concepts = client.domains.concepts("Drug", standard_only=True) +```python +client = OMOPHub( + api_key="oh_xxx", # Or set OMOPHUB_API_KEY env var + base_url="https://api.omophub.com/v1", # API endpoint + timeout=30.0, # Request timeout (seconds) + max_retries=3, # Retry attempts + vocab_version="2025.2", # Specific vocabulary version +) ``` ## Error Handling @@ -205,49 +260,99 @@ concepts = client.domains.concepts("Drug", standard_only=True) import omophub try: - client = omophub.OMOPHub(api_key="oh_xxx") concept = client.concepts.get(999999999) except omophub.NotFoundError as e: print(f"Concept not found: {e.message}") except omophub.AuthenticationError as e: - print(f"Authentication failed: {e.message}") + print(f"Check your API key: {e.message}") except omophub.RateLimitError as e: print(f"Rate limited. Retry after {e.retry_after} seconds") -except omophub.ValidationError as e: - print(f"Invalid request: {e.message}") except omophub.APIError as e: print(f"API error {e.status_code}: {e.message}") -except omophub.OMOPHubError as e: - print(f"SDK error: {e.message}") ``` -## Type Hints +## Type Safety -The SDK is fully typed with TypedDict definitions for all API responses: +The SDK is fully typed with TypedDict definitions for IDE autocomplete: ```python from omophub import OMOPHub, Concept -client = OMOPHub(api_key="oh_xxx") +client = OMOPHub() concept: Concept = client.concepts.get(201826) # IDE autocomplete works for all fields -print(concept["concept_id"]) -print(concept["concept_name"]) -print(concept["vocabulary_id"]) +concept["concept_id"] # int +concept["concept_name"] # str +concept["vocabulary_id"] # str +concept["domain_id"] # str +concept["concept_class_id"] # str +``` + +## Integration Examples + +### With Pandas + +```python +import pandas as pd + +# Search and load into DataFrame +results = client.search.basic("hypertension", page_size=100) +df = pd.DataFrame(results["concepts"]) +print(df[["concept_id", "concept_name", "vocabulary_id"]].head()) ``` +### In Jupyter Notebooks + +```python +# Iterate through all results with auto-pagination +for concept in client.search.basic_iter("diabetes", page_size=100): + process_concept(concept) +``` + +## Compared to Alternatives + +| Feature | OMOPHub SDK | ATHENA Download | OHDSI WebAPI | +|---------|-------------|-----------------|--------------| +| Setup time | 1 minute | Hours | Hours | +| Infrastructure | None | Database required | Full OHDSI stack | +| Updates | Automatic | Manual download | Manual | +| Programmatic access | Native Python | SQL queries | REST API | + +**Best for:** Teams who need quick, programmatic access to OMOP vocabularies without infrastructure overhead. + ## Documentation - [Full Documentation](https://docs.omophub.com/sdks/python/overview) - [API Reference](https://docs.omophub.com/api-reference) - [Examples](https://github.com/omopHub/omophub-python/tree/main/examples) +- [Get API Key](https://dashboard.omophub.com/api-keys) -## License +## Contributing -MIT License - see [LICENSE](LICENSE) for details. +We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details. + +```bash +# Clone and install for development +git clone https://github.com/omopHub/omophub-python.git +cd omophub-python +pip install -e ".[dev]" + +# Run tests +pytest +``` ## Support - [GitHub Issues](https://github.com/omopHub/omophub-python/issues) -- [Documentation](https://docs.omophub.com) +- [GitHub Discussions](https://github.com/omopHub/omophub-python/discussions) +- Email: support@omophub.com +- Website: [omophub.com](https://omophub.com) + +## License + +MIT License - see [LICENSE](LICENSE) for details. + +--- + +*Built for the OHDSI community* diff --git a/examples/map_between_vocabularies.py b/examples/map_between_vocabularies.py index f6d8b21..bcd0e73 100644 --- a/examples/map_between_vocabularies.py +++ b/examples/map_between_vocabularies.py @@ -16,8 +16,7 @@ def get_mappings() -> None: result = client.mappings.get( concept_id, - target_vocabularies=["ICD10CM", "Read", "ICD9CM"], - include_mapping_quality=True, + target_vocabulary="ICD10CM", ) source = result.get("source_concept", {}) @@ -32,12 +31,8 @@ def get_mappings() -> None: target_vocab = m.get("target_vocabulary_id", "?") target_code = m.get("target_concept_code", "?") target_name = m.get("target_concept_name", "?") - # Access confidence via quality when available - quality = m.get("quality", {}) - confidence = quality.get("confidence_score", "N/A") if quality else "N/A" print(f"\n [{target_vocab}] {target_code}") print(f" Name: {target_name}") - print(f" Confidence: {confidence}") except omophub.OMOPHubError as e: print(f"API error: {e.message}") finally: @@ -89,14 +84,11 @@ def lookup_by_code() -> None: print(f" Vocabulary: {concept.get('vocabulary_id', 'Unknown')}") print(f" Standard: {concept.get('standard_concept', 'N/A')}") - # If it's not a standard concept, find mappings to standard concepts + # If it's not a standard concept, find mappings if concept.get("standard_concept") != "S": - mappings = client.mappings.get( - concept.get("concept_id", 0), - standard_only=True, - ) + mappings = client.mappings.get(concept.get("concept_id", 0)) - print("\n Standard mappings:") + print("\n Mappings to other vocabularies:") for m in mappings.get("mappings", [])[:5]: print(f" → {m.get('target_concept_name', 'Unknown')}") except omophub.OMOPHubError as e: diff --git a/examples/navigate_hierarchy.py b/examples/navigate_hierarchy.py index fcdffa6..e239047 100644 --- a/examples/navigate_hierarchy.py +++ b/examples/navigate_hierarchy.py @@ -40,7 +40,7 @@ def get_descendants() -> None: result = client.hierarchy.descendants( concept_id, max_levels=2, - standard_only=True, + include_invalid=False, ) concept = result.get("concept", {}) @@ -59,7 +59,7 @@ def explore_relationships() -> None: # Aspirin concept_id = 1112807 - result = client.concepts.relationships(concept_id, page_size=20) + result = client.concepts.relationships(concept_id) relationships = result.get("relationships", result) summary = result.get("relationship_summary", {}) diff --git a/examples/search_concepts.py b/examples/search_concepts.py index 30a87a5..9c091d7 100644 --- a/examples/search_concepts.py +++ b/examples/search_concepts.py @@ -1,5 +1,9 @@ #!/usr/bin/env python3 -"""Examples of searching for concepts using the OMOPHub SDK.""" +"""Examples of searching for concepts using the OMOPHub SDK. + +Demonstrates: basic search, filtered search, autocomplete, pagination, +semantic search, similarity search, bulk lexical search, and bulk semantic search. +""" import omophub @@ -38,17 +42,61 @@ def filtered_search() -> None: print(f" [{c['vocabulary_id']}] {c['concept_name']}") -def fuzzy_search() -> None: - """Demonstrate typo-tolerant fuzzy search.""" - print("\n=== Fuzzy Search ===") +def bulk_lexical_search() -> None: + """Demonstrate bulk lexical search — multiple queries in one call.""" + print("\n=== Bulk Lexical Search ===") - # Fuzzy search handles typos - results = client.search.fuzzy("diabetis mellitus") # Typo in 'diabetes' - concepts = results.get("concepts", results) - print("Fuzzy search for 'diabetis mellitus' (typo):") + # Search for multiple terms at once (up to 50) + results = client.search.bulk_basic( + [ + {"search_id": "q1", "query": "diabetes mellitus"}, + {"search_id": "q2", "query": "hypertension"}, + {"search_id": "q3", "query": "aspirin"}, + ], + defaults={"vocabulary_ids": ["SNOMED"], "page_size": 5}, + ) - for c in concepts[:3]: - print(f" {c['concept_name']}") + for item in results["results"]: + print(f" {item['search_id']}: {len(item['results'])} results ({item['status']})") + + # Per-query overrides — different domains per query + results = client.search.bulk_basic( + [ + {"search_id": "conditions", "query": "diabetes", "domain_ids": ["Condition"]}, + {"search_id": "drugs", "query": "metformin", "domain_ids": ["Drug"]}, + ], + defaults={"vocabulary_ids": ["SNOMED", "RxNorm"], "page_size": 3}, + ) + + print("\n Per-query domain overrides:") + for item in results["results"]: + print(f" {item['search_id']}:") + for c in item["results"]: + print(f" {c['concept_name']} ({c['vocabulary_id']}/{c['domain_id']})") + + +def bulk_semantic_search() -> None: + """Demonstrate bulk semantic search — multiple NLP queries in one call.""" + print("\n=== Bulk Semantic Search ===") + + # Search for multiple natural-language queries (up to 25) + results = client.search.bulk_semantic( + [ + {"search_id": "s1", "query": "heart failure treatment options"}, + {"search_id": "s2", "query": "type 2 diabetes medication"}, + {"search_id": "s3", "query": "elevated blood pressure"}, + ], + defaults={"threshold": 0.5, "page_size": 5}, + ) + + for item in results["results"]: + count = item.get("result_count", len(item["results"])) + print(f" {item['search_id']}: {count} results ({item['status']})") + + # Show top result per query + if item["results"]: + top = item["results"][0] + print(f" Top: {top['concept_name']} (score: {top['similarity_score']:.2f})") def autocomplete_example() -> None: @@ -56,7 +104,7 @@ def autocomplete_example() -> None: print("\n=== Autocomplete ===") # Get suggestions as user types - suggestions = client.concepts.suggest("hypert", limit=5) + suggestions = client.concepts.suggest("hypert", page_size=5) print("Suggestions for 'hypert':") for s in suggestions[:5]: @@ -80,9 +128,69 @@ def pagination_example() -> None: print(f" ... and {count - 3} more concepts shown (demo limited to {count})") +def semantic_search() -> None: + """Demonstrate semantic search using neural embeddings.""" + print("\n=== Semantic Search ===") + + # Natural language search - understands clinical intent + results = client.search.semantic("high blood sugar levels") + for r in results["results"][:3]: + print(f" {r['concept_name']} (similarity: {r['similarity_score']:.2f})") + + # Filtered semantic search with minimum threshold + results = client.search.semantic( + "heart attack", + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + threshold=0.5, + ) + print(f" Found {len(results['results'])} SNOMED conditions for 'heart attack'") + + +def semantic_pagination() -> None: + """Demonstrate auto-pagination with semantic_iter.""" + print("\n=== Semantic Pagination ===") + + count = 0 + for result in client.search.semantic_iter("chronic kidney disease", page_size=20): + count += 1 + if count <= 3: + print(f" {result['concept_id']}: {result['concept_name']}") + if count >= 50: # Limit for demo + break + + if count > 3: + print(f" ... and {count - 3} more results (demo limited to {count})") + + +def similarity_search() -> None: + """Demonstrate similarity search.""" + print("\n=== Similarity Search ===") + + # Find concepts similar to Type 2 diabetes mellitus (concept_id=201826) + results = client.search.similar(concept_id=201826, algorithm="hybrid") + print("Concepts similar to 'Type 2 diabetes mellitus':") + for r in results["results"][:5]: + print(f" {r['concept_name']} (score: {r['similarity_score']:.2f})") + + # Find similar using a natural language query with semantic algorithm + results = client.search.similar( + query="medications for high blood pressure", + algorithm="semantic", + similarity_threshold=0.6, + vocabulary_ids=["RxNorm"], + include_scores=True, + ) + print(f"\n Found {len(results['results'])} similar RxNorm concepts") + + if __name__ == "__main__": basic_search() filtered_search() - fuzzy_search() autocomplete_example() pagination_example() + semantic_search() + semantic_pagination() + similarity_search() + bulk_lexical_search() + bulk_semantic_search() diff --git a/pyproject.toml b/pyproject.toml index d70cdb8..5e8646c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "hatchling.build" [project] name = "omophub" dynamic = ["version"] -description = "Python SDK for OMOPHub - Medical Vocabulary API" +description = "Python SDK for OMOPHub - Medical Vocabulary API with semantic search" readme = "README.md" license = "MIT" requires-python = ">=3.10" @@ -27,6 +27,9 @@ keywords = [ "terminology", "api", "sdk", + "semantic-search", + "embeddings", + "nlp", ] classifiers = [ "Development Status :: 4 - Beta", diff --git a/src/omophub/_client.py b/src/omophub/_client.py index b9e1d0f..338e857 100644 --- a/src/omophub/_client.py +++ b/src/omophub/_client.py @@ -17,6 +17,7 @@ from ._request import AsyncRequest, Request from .resources.concepts import AsyncConcepts, Concepts from .resources.domains import AsyncDomains, Domains +from .resources.fhir import AsyncFhir, Fhir from .resources.hierarchy import AsyncHierarchy, Hierarchy from .resources.mappings import AsyncMappings, Mappings from .resources.relationships import AsyncRelationships, Relationships @@ -97,6 +98,14 @@ def __init__( self._mappings: Mappings | None = None self._vocabularies: Vocabularies | None = None self._domains: Domains | None = None + self._fhir: Fhir | None = None + + @property + def fhir(self) -> Fhir: + """Access the FHIR resolver resource.""" + if self._fhir is None: + self._fhir = Fhir(self._request) + return self._fhir @property def concepts(self) -> Concepts: @@ -228,6 +237,14 @@ def __init__( self._mappings: AsyncMappings | None = None self._vocabularies: AsyncVocabularies | None = None self._domains: AsyncDomains | None = None + self._fhir: AsyncFhir | None = None + + @property + def fhir(self) -> AsyncFhir: + """Access the FHIR resolver resource.""" + if self._fhir is None: + self._fhir = AsyncFhir(self._request) + return self._fhir @property def concepts(self) -> AsyncConcepts: diff --git a/src/omophub/_http.py b/src/omophub/_http.py index abab848..31384c2 100644 --- a/src/omophub/_http.py +++ b/src/omophub/_http.py @@ -2,6 +2,7 @@ from __future__ import annotations +import random import time from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any @@ -12,6 +13,12 @@ from ._exceptions import ConnectionError, TimeoutError from ._version import get_version +# Retry constants (OpenAI-style exponential backoff with jitter) +INITIAL_RETRY_DELAY = 0.5 # seconds +MAX_RETRY_DELAY = 8.0 # seconds +MAX_RETRY_AFTER = 60 # max seconds to respect from Retry-After header +RETRYABLE_STATUS_CODES = (429, 502, 503, 504) + if TYPE_CHECKING: from collections.abc import Mapping @@ -24,6 +31,37 @@ HTTP2_AVAILABLE = False +def _calculate_retry_delay( + attempt: int, + max_retries: int, + response_headers: Mapping[str, str] | None = None, +) -> float: + """Calculate retry delay with Retry-After support and exponential backoff + jitter. + + Follows the OpenAI pattern: + 1. If Retry-After header present and <= 60s, use it + 2. Otherwise, exponential backoff (0.5s * 2^attempt) with 25% jitter, capped at 8s + """ + # Check Retry-After header first + if response_headers: + retry_after = response_headers.get("retry-after") or response_headers.get( + "Retry-After" + ) + if retry_after: + try: + retry_after_seconds = float(retry_after) + if 0 < retry_after_seconds <= MAX_RETRY_AFTER: + return retry_after_seconds + except ValueError: + pass + + # Exponential backoff with jitter + retries_done = min(max_retries - (max_retries - attempt), 1000) + sleep_seconds = min(INITIAL_RETRY_DELAY * (2.0**retries_done), MAX_RETRY_DELAY) + jitter = 1 - 0.25 * random.random() + return sleep_seconds * jitter + + class HTTPClient(ABC): """Abstract base class for HTTP clients.""" @@ -103,7 +141,7 @@ def _get_default_headers(self) -> dict[str, str]: return { "Accept": "application/json", "Content-Type": "application/json", - "User-Agent": f"omophub-python/{get_version()}", + "User-Agent": f"OMOPHub-SDK-Python/{get_version()}", } def request( @@ -137,6 +175,16 @@ def request( params=filtered_params if filtered_params else None, json=json, ) + # Retry on rate limits (429) and server errors (502, 503, 504) + if ( + response.status_code in RETRYABLE_STATUS_CODES + and attempt < self._max_retries + ): + delay = _calculate_retry_delay( + attempt, self._max_retries, response.headers + ) + time.sleep(delay) + continue return response.content, response.status_code, response.headers except httpx.ConnectError as e: @@ -148,7 +196,8 @@ def request( # Exponential backoff before retry if attempt < self._max_retries: - time.sleep(2**attempt * 0.1) + delay = _calculate_retry_delay(attempt, self._max_retries) + time.sleep(delay) raise last_exception or ConnectionError("Request failed after retries") @@ -186,7 +235,7 @@ def _get_default_headers(self) -> dict[str, str]: return { "Accept": "application/json", "Content-Type": "application/json", - "User-Agent": f"omophub-python/{get_version()}", + "User-Agent": f"OMOPHub-SDK-Python/{get_version()}", } async def request( @@ -222,6 +271,16 @@ async def request( params=filtered_params if filtered_params else None, json=json, ) + # Retry on rate limits (429) and server errors (502, 503, 504) + if ( + response.status_code in RETRYABLE_STATUS_CODES + and attempt < self._max_retries + ): + delay = _calculate_retry_delay( + attempt, self._max_retries, response.headers + ) + await asyncio.sleep(delay) + continue return response.content, response.status_code, response.headers except httpx.ConnectError as e: @@ -233,7 +292,8 @@ async def request( # Exponential backoff before retry if attempt < self._max_retries: - await asyncio.sleep(2**attempt * 0.1) + delay = _calculate_retry_delay(attempt, self._max_retries) + await asyncio.sleep(delay) raise last_exception or ConnectionError("Request failed after retries") diff --git a/src/omophub/_pagination.py b/src/omophub/_pagination.py index ba80bbb..a81aade 100644 --- a/src/omophub/_pagination.py +++ b/src/omophub/_pagination.py @@ -6,7 +6,7 @@ from urllib.parse import urlencode if TYPE_CHECKING: - from collections.abc import AsyncIterator, Callable, Iterator + from collections.abc import AsyncIterator, Awaitable, Callable, Iterator from ._types import PaginationMeta @@ -106,7 +106,7 @@ def paginate_sync( async def paginate_async( - fetch_page: Callable[[int, int], tuple[list[T], PaginationMeta | None]], + fetch_page: Callable[[int, int], Awaitable[tuple[list[T], PaginationMeta | None]]], page_size: int = DEFAULT_PAGE_SIZE, ) -> AsyncIterator[T]: """Create an async iterator that auto-paginates through results. @@ -121,12 +121,7 @@ async def paginate_async( page = 1 while True: - # Note: fetch_page should be an async function - result = fetch_page(page, page_size) - if hasattr(result, "__await__"): - items, meta = await result # type: ignore - else: - items, meta = result + items, meta = await fetch_page(page, page_size) for item in items: yield item diff --git a/src/omophub/_request.py b/src/omophub/_request.py index ceaa0bd..4649bc0 100644 --- a/src/omophub/_request.py +++ b/src/omophub/_request.py @@ -17,6 +17,66 @@ T = TypeVar("T") +def _parse_and_raise( + content: bytes, + status_code: int, + headers: Mapping[str, str], +) -> dict[str, Any]: + """Parse JSON response body and raise on HTTP errors. + + Shared by both sync and async request classes to avoid duplicating + the JSON-decode, error-extraction, and rate-limit-retry logic. + + Returns: + The parsed JSON dict (caller decides whether to unwrap ``data``). + + Raises: + OMOPHubError: On invalid JSON from a successful response. + APIError / RateLimitError / etc.: On HTTP error status codes. + """ + request_id = headers.get("X-Request-Id") or headers.get("x-request-id") + + try: + data = json.loads(content) if content else {} + except json.JSONDecodeError as exc: + if status_code >= 400: + raise_for_status( + status_code, + f"Request failed with status {status_code}", + request_id=request_id, + ) + raise OMOPHubError( + f"Invalid JSON response: {content[:200].decode(errors='replace')}" + ) from exc + + if status_code >= 400: + error_response: ErrorResponse = data # type: ignore[assignment] + error = error_response.get("error", {}) + message = error.get("message", f"Request failed with status {status_code}") + error_code = error.get("code") + details = error.get("details") + + retry_after = None + if status_code == 429: + retry_after_header = headers.get("Retry-After") or headers.get( + "retry-after" + ) + if retry_after_header: + with contextlib.suppress(ValueError): + retry_after = int(retry_after_header) + + raise_for_status( + status_code, + message, + request_id=request_id, + error_code=error_code, + details=details, + retry_after=retry_after, + ) + + return data + + class Request(Generic[T]): """Handles API request execution and response parsing.""" @@ -50,53 +110,20 @@ def _parse_response( status_code: int, headers: Mapping[str, str], ) -> T: - """Parse API response and handle errors.""" - request_id = headers.get("X-Request-Id") or headers.get("x-request-id") - - try: - data = json.loads(content) if content else {} - except json.JSONDecodeError as exc: - if status_code >= 400: - raise_for_status( - status_code, - f"Request failed with status {status_code}", - request_id=request_id, - ) - raise OMOPHubError( - f"Invalid JSON response: {content[:200].decode(errors='replace')}" - ) from exc - - # Handle error responses - if status_code >= 400: - error_response: ErrorResponse = data # type: ignore[assignment] - error = error_response.get("error", {}) - message = error.get("message", f"Request failed with status {status_code}") - error_code = error.get("code") - details = error.get("details") - - # Check for rate limit retry-after - retry_after = None - if status_code == 429: - retry_after_header = headers.get("Retry-After") or headers.get( - "retry-after" - ) - if retry_after_header: - with contextlib.suppress(ValueError): - retry_after = int(retry_after_header) - - raise_for_status( - status_code, - message, - request_id=request_id, - error_code=error_code, - details=details, - retry_after=retry_after, - ) - - # Return successful response data + """Parse API response, raise on errors, return the ``data`` field.""" + data = _parse_and_raise(content, status_code, headers) response: APIResponse = data # type: ignore[assignment] return response.get("data", data) + def _parse_response_raw( + self, + content: bytes, + status_code: int, + headers: Mapping[str, str], + ) -> dict[str, Any]: + """Parse API response, raise on errors, return the full dict with ``meta``.""" + return _parse_and_raise(content, status_code, headers) + def get( self, path: str, @@ -112,6 +139,25 @@ def get( ) return self._parse_response(content, status_code, headers) + def get_raw( + self, + path: str, + params: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Make a GET request and return full response with meta. + + Unlike get() which extracts just the 'data' field, + this method returns the complete response including 'meta' for pagination. + """ + url = self._build_url(path) + content, status_code, headers = self._http_client.request( + "GET", + url, + headers=self._get_auth_headers(), + params=params, + ) + return self._parse_response_raw(content, status_code, headers) + def post( self, path: str, @@ -163,53 +209,20 @@ def _parse_response( status_code: int, headers: Mapping[str, str], ) -> T: - """Parse API response and handle errors.""" - request_id = headers.get("X-Request-Id") or headers.get("x-request-id") - - try: - data = json.loads(content) if content else {} - except json.JSONDecodeError as exc: - if status_code >= 400: - raise_for_status( - status_code, - f"Request failed with status {status_code}", - request_id=request_id, - ) - raise OMOPHubError( - f"Invalid JSON response: {content[:200].decode(errors='replace')}" - ) from exc - - # Handle error responses - if status_code >= 400: - error_response: ErrorResponse = data # type: ignore[assignment] - error = error_response.get("error", {}) - message = error.get("message", f"Request failed with status {status_code}") - error_code = error.get("code") - details = error.get("details") - - # Check for rate limit retry-after - retry_after = None - if status_code == 429: - retry_after_header = headers.get("Retry-After") or headers.get( - "retry-after" - ) - if retry_after_header: - with contextlib.suppress(ValueError): - retry_after = int(retry_after_header) - - raise_for_status( - status_code, - message, - request_id=request_id, - error_code=error_code, - details=details, - retry_after=retry_after, - ) - - # Return successful response data + """Parse API response, raise on errors, return the ``data`` field.""" + data = _parse_and_raise(content, status_code, headers) response: APIResponse = data # type: ignore[assignment] return response.get("data", data) + def _parse_response_raw( + self, + content: bytes, + status_code: int, + headers: Mapping[str, str], + ) -> dict[str, Any]: + """Parse API response, raise on errors, return the full dict with ``meta``.""" + return _parse_and_raise(content, status_code, headers) + async def get( self, path: str, @@ -225,6 +238,25 @@ async def get( ) return self._parse_response(content, status_code, headers) + async def get_raw( + self, + path: str, + params: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Make an async GET request and return full response with meta. + + Unlike get() which extracts just the 'data' field, + this method returns the complete response including 'meta' for pagination. + """ + url = self._build_url(path) + content, status_code, headers = await self._http_client.request( + "GET", + url, + headers=self._get_auth_headers(), + params=params, + ) + return self._parse_response_raw(content, status_code, headers) + async def post( self, path: str, diff --git a/src/omophub/_version.py b/src/omophub/_version.py index 159d658..7476876 100644 --- a/src/omophub/_version.py +++ b/src/omophub/_version.py @@ -1,6 +1,11 @@ """Version information for the OMOPHub SDK.""" -__version__ = "0.1.0" +from importlib.metadata import PackageNotFoundError, version + +try: + __version__ = version("omophub") +except PackageNotFoundError: + __version__ = "0.0.0-dev" def get_version() -> str: diff --git a/src/omophub/resources/concepts.py b/src/omophub/resources/concepts.py index d197a4e..def06dc 100644 --- a/src/omophub/resources/concepts.py +++ b/src/omophub/resources/concepts.py @@ -7,7 +7,6 @@ if TYPE_CHECKING: from .._request import AsyncRequest, Request from ..types.concept import BatchConceptResult, Concept - from ..types.search import Suggestion class GetConceptParams(TypedDict, total=False): @@ -32,21 +31,19 @@ class SuggestParams(TypedDict, total=False): """Parameters for concept suggestions.""" query: str - vocabulary: str - domain: str - limit: int + page: int + page_size: int + vocabulary_ids: list[str] + domain_ids: list[str] + vocab_release: str class RelatedParams(TypedDict, total=False): """Parameters for related concepts.""" - relatedness_types: list[str] - vocabulary_ids: list[str] - domain_ids: list[str] - min_relatedness_score: float - max_results: int - include_scores: bool - standard_concepts_only: bool + relationship_types: list[str] + min_score: float + page_size: int class RelationshipsParams(TypedDict, total=False): @@ -71,13 +68,17 @@ def get( *, include_relationships: bool = False, include_synonyms: bool = False, + include_hierarchy: bool = False, + vocab_release: str | None = None, ) -> Concept: """Get a concept by ID. Args: concept_id: The OMOP concept ID - include_relationships: Include related concepts + include_relationships: Include related concepts (parents/children) include_synonyms: Include concept synonyms + include_hierarchy: Include hierarchy information + vocab_release: Specific vocabulary release (e.g., "2025.2") Returns: The concept data @@ -87,6 +88,10 @@ def get( params["include_relationships"] = "true" if include_synonyms: params["include_synonyms"] = "true" + if include_hierarchy: + params["include_hierarchy"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release return self._request.get(f"/concepts/{concept_id}", params=params or None) @@ -94,17 +99,39 @@ def get_by_code( self, vocabulary_id: str, concept_code: str, + *, + include_relationships: bool = False, + include_synonyms: bool = False, + include_hierarchy: bool = False, + vocab_release: str | None = None, ) -> Concept: """Get a concept by vocabulary and code. Args: vocabulary_id: The vocabulary ID (e.g., "SNOMED", "ICD10CM") concept_code: The concept code within the vocabulary + include_relationships: Include related concepts (parents/children) + include_synonyms: Include concept synonyms + include_hierarchy: Include hierarchy information + vocab_release: Specific vocabulary release (e.g., "2025.2") Returns: - The concept data with mappings + The concept data with optional relationships and synonyms """ - return self._request.get(f"/concepts/by-code/{vocabulary_id}/{concept_code}") + params: dict[str, Any] = {} + if include_relationships: + params["include_relationships"] = "true" + if include_synonyms: + params["include_synonyms"] = "true" + if include_hierarchy: + params["include_hierarchy"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release + + return self._request.get( + f"/concepts/by-code/{vocabulary_id}/{concept_code}", + params=params or None, + ) def batch( self, @@ -114,17 +141,17 @@ def batch( include_synonyms: bool = False, include_mappings: bool = False, vocabulary_filter: list[str] | None = None, - standard_only: bool = False, + standard_only: bool = True, ) -> BatchConceptResult: """Get multiple concepts by IDs. Args: - concept_ids: List of concept IDs (max 1000) + concept_ids: List of concept IDs (max 100) include_relationships: Include related concepts include_synonyms: Include concept synonyms include_mappings: Include concept mappings vocabulary_filter: Filter results to specific vocabularies - standard_only: Only return standard concepts + standard_only: Only return standard concepts (default True) Returns: Batch result with concepts and any failures @@ -147,26 +174,32 @@ def suggest( self, query: str, *, - vocabulary: str | None = None, - domain: str | None = None, - limit: int = 10, - ) -> list[Suggestion]: + page: int = 1, + page_size: int = 10, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + vocab_release: str | None = None, + ) -> dict[str, Any]: """Get concept suggestions (autocomplete). Args: - query: Search query (min 2 characters) - vocabulary: Filter to specific vocabulary - domain: Filter to specific domain - limit: Maximum suggestions (default 10, max 50) + query: Search query (min 2 characters, max 100 characters) + page: Page number (default 1) + page_size: Number of suggestions per page (default 10, max 100) + vocabulary_ids: Filter to specific vocabularies (e.g., ["SNOMED", "ICD10CM"]) + domain_ids: Filter to specific domains (e.g., ["Condition", "Drug"]) + vocab_release: Specific vocabulary release (e.g., "2025.2") Returns: - List of suggestions + Paginated response with suggestions and pagination metadata """ - params: dict[str, Any] = {"query": query, "limit": limit} - if vocabulary: - params["vocabulary"] = vocabulary - if domain: - params["domain"] = domain + params: dict[str, Any] = {"query": query, "page": page, "page_size": page_size} + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if vocab_release: + params["vocab_release"] = vocab_release return self._request.get("/concepts/suggest", params=params) @@ -174,43 +207,30 @@ def related( self, concept_id: int, *, - relatedness_types: list[str] | None = None, - vocabulary_ids: list[str] | None = None, - domain_ids: list[str] | None = None, - min_relatedness_score: float | None = None, - max_results: int = 50, - include_scores: bool = True, - standard_concepts_only: bool = False, + relationship_types: list[str] | None = None, + min_score: float | None = None, + page_size: int = 20, + vocab_release: str | None = None, ) -> dict[str, Any]: """Get related concepts. Args: concept_id: The source concept ID - relatedness_types: Types of relatedness (hierarchical, semantic, etc.) - vocabulary_ids: Filter to specific vocabularies - domain_ids: Filter to specific domains - min_relatedness_score: Minimum relatedness score - max_results: Maximum results (default 50, max 200) - include_scores: Include score breakdown - standard_concepts_only: Only return standard concepts + relationship_types: Filter by relationship types (e.g., ["Is a", "Maps to"]) + min_score: Minimum relationship score (0.0-1.0) + page_size: Maximum number of results (default 20, max 100) + vocab_release: Specific vocabulary release (e.g., "2025.1") Returns: - Related concepts with scores and analysis + Related concepts with relationship scores """ - params: dict[str, Any] = { - "max_results": max_results, - "include_scores": "true" if include_scores else "false", - } - if relatedness_types: - params["relatedness_types"] = ",".join(relatedness_types) - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - if domain_ids: - params["domain_ids"] = ",".join(domain_ids) - if min_relatedness_score is not None: - params["min_relatedness_score"] = min_relatedness_score - if standard_concepts_only: - params["standard_concepts_only"] = "true" + params: dict[str, Any] = {"page_size": page_size} + if relationship_types: + params["relationship_types"] = ",".join(relationship_types) + if min_score is not None: + params["min_score"] = min_score + if vocab_release: + params["vocab_release"] = vocab_release return self._request.get(f"/concepts/{concept_id}/related", params=params) @@ -218,35 +238,98 @@ def relationships( self, concept_id: int, *, - relationship_type: str | None = None, - target_vocabulary: str | None = None, + relationship_ids: str | list[str] | None = None, + vocabulary_ids: str | list[str] | None = None, + domain_ids: str | list[str] | None = None, include_invalid: bool = False, - page: int = 1, - page_size: int = 20, + standard_only: bool = False, + include_reverse: bool = False, + vocab_release: str | None = None, ) -> dict[str, Any]: """Get concept relationships. Args: concept_id: The concept ID - relationship_type: Filter by relationship type - target_vocabulary: Filter by target vocabulary - include_invalid: Include invalid relationships - page: Page number - page_size: Items per page + relationship_ids: Filter by relationship type IDs (string or list) + vocabulary_ids: Filter by target vocabulary IDs (string or list) + domain_ids: Filter by target domain IDs (string or list) + include_invalid: Include relationships to invalid concepts + standard_only: Only include relationships to standard concepts + include_reverse: Include reverse relationships + vocab_release: Specific vocabulary release version Returns: - Relationships with summary + Relationships data """ - params: dict[str, Any] = {"page": page, "page_size": page_size} - if relationship_type: - params["relationship_type"] = relationship_type - if target_vocabulary: - params["target_vocabulary"] = target_vocabulary + params: dict[str, Any] = {} + if relationship_ids: + params["relationship_ids"] = ( + ",".join(relationship_ids) + if isinstance(relationship_ids, list) + else relationship_ids + ) + if vocabulary_ids: + params["vocabulary_ids"] = ( + ",".join(vocabulary_ids) + if isinstance(vocabulary_ids, list) + else vocabulary_ids + ) + if domain_ids: + params["domain_ids"] = ( + ",".join(domain_ids) if isinstance(domain_ids, list) else domain_ids + ) if include_invalid: params["include_invalid"] = "true" + if standard_only: + params["standard_only"] = "true" + if include_reverse: + params["include_reverse"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release return self._request.get(f"/concepts/{concept_id}/relationships", params=params) + def recommended( + self, + concept_ids: list[int], + *, + relationship_types: list[str] | None = None, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_only: bool = True, + include_invalid: bool = False, + page: int = 1, + page_size: int = 100, + ) -> dict[str, Any]: + """Get recommended concepts using OHDSI Phoebe algorithm. + + Args: + concept_ids: List of source concept IDs (1-100) + relationship_types: Filter by relationship types (max 20) + vocabulary_ids: Filter to specific vocabularies (max 50) + domain_ids: Filter to specific domains (max 50) + standard_only: Only return standard concepts (default True) + include_invalid: Include invalid/deprecated concepts (default False) + page: Page number (default 1) + page_size: Results per page (default 100, max 1000) + + Returns: + Recommendations grouped by source concept ID with pagination metadata + """ + body: dict[str, Any] = {"concept_ids": concept_ids} + if relationship_types: + body["relationship_types"] = relationship_types + if vocabulary_ids: + body["vocabulary_ids"] = vocabulary_ids + if domain_ids: + body["domain_ids"] = domain_ids + body["standard_only"] = standard_only + body["include_invalid"] = include_invalid + body["page"] = page + body["page_size"] = page_size + + return self._request.post("/concepts/recommended", json_data=body) + class AsyncConcepts: """Asynchronous concepts resource.""" @@ -260,13 +343,30 @@ async def get( *, include_relationships: bool = False, include_synonyms: bool = False, + include_hierarchy: bool = False, + vocab_release: str | None = None, ) -> Concept: - """Get a concept by ID.""" + """Get a concept by ID. + + Args: + concept_id: The OMOP concept ID + include_relationships: Include related concepts (parents/children) + include_synonyms: Include concept synonyms + include_hierarchy: Include hierarchy information + vocab_release: Specific vocabulary release (e.g., "2025.2") + + Returns: + The concept data + """ params: dict[str, Any] = {} if include_relationships: params["include_relationships"] = "true" if include_synonyms: params["include_synonyms"] = "true" + if include_hierarchy: + params["include_hierarchy"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release return await self._request.get(f"/concepts/{concept_id}", params=params or None) @@ -274,10 +374,38 @@ async def get_by_code( self, vocabulary_id: str, concept_code: str, + *, + include_relationships: bool = False, + include_synonyms: bool = False, + include_hierarchy: bool = False, + vocab_release: str | None = None, ) -> Concept: - """Get a concept by vocabulary and code.""" + """Get a concept by vocabulary and code. + + Args: + vocabulary_id: The vocabulary ID (e.g., "SNOMED", "ICD10CM") + concept_code: The concept code within the vocabulary + include_relationships: Include related concepts (parents/children) + include_synonyms: Include concept synonyms + include_hierarchy: Include hierarchy information + vocab_release: Specific vocabulary release (e.g., "2025.2") + + Returns: + The concept data with optional relationships and synonyms + """ + params: dict[str, Any] = {} + if include_relationships: + params["include_relationships"] = "true" + if include_synonyms: + params["include_synonyms"] = "true" + if include_hierarchy: + params["include_hierarchy"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release + return await self._request.get( - f"/concepts/by-code/{vocabulary_id}/{concept_code}" + f"/concepts/by-code/{vocabulary_id}/{concept_code}", + params=params or None, ) async def batch( @@ -288,9 +416,9 @@ async def batch( include_synonyms: bool = False, include_mappings: bool = False, vocabulary_filter: list[str] | None = None, - standard_only: bool = False, + standard_only: bool = True, ) -> BatchConceptResult: - """Get multiple concepts by IDs.""" + """Get multiple concepts by IDs (max 100).""" body: dict[str, Any] = {"concept_ids": concept_ids} if include_relationships: body["include_relationships"] = True @@ -309,16 +437,32 @@ async def suggest( self, query: str, *, - vocabulary: str | None = None, - domain: str | None = None, - limit: int = 10, - ) -> list[Suggestion]: - """Get concept suggestions (autocomplete).""" - params: dict[str, Any] = {"query": query, "limit": limit} - if vocabulary: - params["vocabulary"] = vocabulary - if domain: - params["domain"] = domain + page: int = 1, + page_size: int = 10, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + vocab_release: str | None = None, + ) -> dict[str, Any]: + """Get concept suggestions (autocomplete). + + Args: + query: Search query (min 2 characters, max 100 characters) + page: Page number (default 1) + page_size: Number of suggestions per page (default 10, max 100) + vocabulary_ids: Filter to specific vocabularies (e.g., ["SNOMED", "ICD10CM"]) + domain_ids: Filter to specific domains (e.g., ["Condition", "Drug"]) + vocab_release: Specific vocabulary release (e.g., "2025.2") + + Returns: + Paginated response with suggestions and pagination metadata + """ + params: dict[str, Any] = {"query": query, "page": page, "page_size": page_size} + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if vocab_release: + params["vocab_release"] = vocab_release return await self._request.get("/concepts/suggest", params=params) @@ -326,29 +470,30 @@ async def related( self, concept_id: int, *, - relatedness_types: list[str] | None = None, - vocabulary_ids: list[str] | None = None, - domain_ids: list[str] | None = None, - min_relatedness_score: float | None = None, - max_results: int = 50, - include_scores: bool = True, - standard_concepts_only: bool = False, + relationship_types: list[str] | None = None, + min_score: float | None = None, + page_size: int = 20, + vocab_release: str | None = None, ) -> dict[str, Any]: - """Get related concepts.""" - params: dict[str, Any] = { - "max_results": max_results, - "include_scores": "true" if include_scores else "false", - } - if relatedness_types: - params["relatedness_types"] = ",".join(relatedness_types) - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - if domain_ids: - params["domain_ids"] = ",".join(domain_ids) - if min_relatedness_score is not None: - params["min_relatedness_score"] = min_relatedness_score - if standard_concepts_only: - params["standard_concepts_only"] = "true" + """Get related concepts. + + Args: + concept_id: The source concept ID + relationship_types: Filter by relationship types (e.g., ["Is a", "Maps to"]) + min_score: Minimum relationship score (0.0-1.0) + page_size: Maximum number of results (default 20, max 100) + vocab_release: Specific vocabulary release (e.g., "2025.1") + + Returns: + Related concepts with relationship scores + """ + params: dict[str, Any] = {"page_size": page_size} + if relationship_types: + params["relationship_types"] = ",".join(relationship_types) + if min_score is not None: + params["min_score"] = min_score + if vocab_release: + params["vocab_release"] = vocab_release return await self._request.get(f"/concepts/{concept_id}/related", params=params) @@ -356,21 +501,82 @@ async def relationships( self, concept_id: int, *, - relationship_type: str | None = None, - target_vocabulary: str | None = None, + relationship_ids: str | list[str] | None = None, + vocabulary_ids: str | list[str] | None = None, + domain_ids: str | list[str] | None = None, include_invalid: bool = False, - page: int = 1, - page_size: int = 20, + standard_only: bool = False, + include_reverse: bool = False, + vocab_release: str | None = None, ) -> dict[str, Any]: """Get concept relationships.""" - params: dict[str, Any] = {"page": page, "page_size": page_size} - if relationship_type: - params["relationship_type"] = relationship_type - if target_vocabulary: - params["target_vocabulary"] = target_vocabulary + params: dict[str, Any] = {} + if relationship_ids: + params["relationship_ids"] = ( + ",".join(relationship_ids) + if isinstance(relationship_ids, list) + else relationship_ids + ) + if vocabulary_ids: + params["vocabulary_ids"] = ( + ",".join(vocabulary_ids) + if isinstance(vocabulary_ids, list) + else vocabulary_ids + ) + if domain_ids: + params["domain_ids"] = ( + ",".join(domain_ids) if isinstance(domain_ids, list) else domain_ids + ) if include_invalid: params["include_invalid"] = "true" + if standard_only: + params["standard_only"] = "true" + if include_reverse: + params["include_reverse"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release return await self._request.get( f"/concepts/{concept_id}/relationships", params=params ) + + async def recommended( + self, + concept_ids: list[int], + *, + relationship_types: list[str] | None = None, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_only: bool = True, + include_invalid: bool = False, + page: int = 1, + page_size: int = 100, + ) -> dict[str, Any]: + """Get recommended concepts using OHDSI Phoebe algorithm. + + Args: + concept_ids: List of source concept IDs (1-100) + relationship_types: Filter by relationship types (max 20) + vocabulary_ids: Filter to specific vocabularies (max 50) + domain_ids: Filter to specific domains (max 50) + standard_only: Only return standard concepts (default True) + include_invalid: Include invalid/deprecated concepts (default False) + page: Page number (default 1) + page_size: Results per page (default 100, max 1000) + + Returns: + Recommendations grouped by source concept ID with pagination metadata + """ + body: dict[str, Any] = {"concept_ids": concept_ids} + if relationship_types: + body["relationship_types"] = relationship_types + if vocabulary_ids: + body["vocabulary_ids"] = vocabulary_ids + if domain_ids: + body["domain_ids"] = domain_ids + body["standard_only"] = standard_only + body["include_invalid"] = include_invalid + body["page"] = page + body["page_size"] = page_size + + return await self._request.post("/concepts/recommended", json_data=body) diff --git a/src/omophub/resources/domains.py b/src/omophub/resources/domains.py index e37d826..d0ed192 100644 --- a/src/omophub/resources/domains.py +++ b/src/omophub/resources/domains.py @@ -19,46 +19,19 @@ def __init__(self, request: Request[Any]) -> None: def list( self, *, - vocabulary_ids: builtins.list[str] | None = None, - include_concept_counts: bool = True, - include_statistics: bool = False, - include_examples: bool = False, - standard_only: bool = False, - active_only: bool = True, - sort_by: str = "domain_id", - sort_order: str = "asc", + include_stats: bool = False, ) -> dict[str, Any]: """List all domains. Args: - vocabulary_ids: Filter by vocabularies - include_concept_counts: Include concept counts - include_statistics: Include detailed statistics - include_examples: Include example concepts - standard_only: Only standard concepts - active_only: Only active domains - sort_by: Sort field - sort_order: Sort order + include_stats: Include concept counts and vocabulary coverage Returns: - Domain list with summary + Domain list """ - params: dict[str, Any] = { - "sort_by": sort_by, - "sort_order": sort_order, - } - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - if include_concept_counts: - params["include_concept_counts"] = "true" - if include_statistics: - params["include_statistics"] = "true" - if include_examples: - params["include_examples"] = "true" - if standard_only: - params["standard_only"] = "true" - if not active_only: - params["active_only"] = "false" + params: dict[str, Any] = {} + if include_stats: + params["include_stats"] = "true" return self._request.get("/domains", params=params) @@ -67,8 +40,8 @@ def concepts( domain_id: str, *, vocabulary_ids: builtins.list[str] | None = None, - concept_class_ids: builtins.list[str] | None = None, standard_only: bool = False, + include_invalid: bool = False, page: int = 1, page_size: int = 50, ) -> dict[str, Any]: @@ -77,8 +50,8 @@ def concepts( Args: domain_id: The domain ID vocabulary_ids: Filter by vocabularies - concept_class_ids: Filter by concept classes standard_only: Only standard concepts + include_invalid: Include invalid/deprecated concepts page: Page number page_size: Results per page @@ -88,10 +61,10 @@ def concepts( params: dict[str, Any] = {"page": page, "page_size": page_size} if vocabulary_ids: params["vocabulary_ids"] = ",".join(vocabulary_ids) - if concept_class_ids: - params["concept_class_ids"] = ",".join(concept_class_ids) if standard_only: params["standard_only"] = "true" + if include_invalid: + params["include_invalid"] = "true" return self._request.get(f"/domains/{domain_id}/concepts", params=params) @@ -105,46 +78,19 @@ def __init__(self, request: AsyncRequest[Any]) -> None: async def list( self, *, - vocabulary_ids: builtins.list[str] | None = None, - include_concept_counts: bool = True, - include_statistics: bool = False, - include_examples: bool = False, - standard_only: bool = False, - active_only: bool = True, - sort_by: str = "domain_id", - sort_order: str = "asc", + include_stats: bool = False, ) -> dict[str, Any]: """List all domains. Args: - vocabulary_ids: Filter by vocabularies - include_concept_counts: Include concept counts - include_statistics: Include detailed statistics - include_examples: Include example concepts - standard_only: Only standard concepts - active_only: Only active domains - sort_by: Sort field - sort_order: Sort order + include_stats: Include concept counts and vocabulary coverage Returns: - Domain list with summary + Domain list """ - params: dict[str, Any] = { - "sort_by": sort_by, - "sort_order": sort_order, - } - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - if include_concept_counts: - params["include_concept_counts"] = "true" - if include_statistics: - params["include_statistics"] = "true" - if include_examples: - params["include_examples"] = "true" - if standard_only: - params["standard_only"] = "true" - if not active_only: - params["active_only"] = "false" + params: dict[str, Any] = {} + if include_stats: + params["include_stats"] = "true" return await self._request.get("/domains", params=params) @@ -153,8 +99,8 @@ async def concepts( domain_id: str, *, vocabulary_ids: builtins.list[str] | None = None, - concept_class_ids: builtins.list[str] | None = None, standard_only: bool = False, + include_invalid: bool = False, page: int = 1, page_size: int = 50, ) -> dict[str, Any]: @@ -163,8 +109,8 @@ async def concepts( Args: domain_id: The domain ID vocabulary_ids: Filter by vocabularies - concept_class_ids: Filter by concept classes standard_only: Only standard concepts + include_invalid: Include invalid/deprecated concepts page: Page number page_size: Results per page @@ -174,9 +120,9 @@ async def concepts( params: dict[str, Any] = {"page": page, "page_size": page_size} if vocabulary_ids: params["vocabulary_ids"] = ",".join(vocabulary_ids) - if concept_class_ids: - params["concept_class_ids"] = ",".join(concept_class_ids) if standard_only: params["standard_only"] = "true" + if include_invalid: + params["include_invalid"] = "true" return await self._request.get(f"/domains/{domain_id}/concepts", params=params) diff --git a/src/omophub/resources/fhir.py b/src/omophub/resources/fhir.py new file mode 100644 index 0000000..5f8560e --- /dev/null +++ b/src/omophub/resources/fhir.py @@ -0,0 +1,272 @@ +"""FHIR Resolver resource implementation.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from .._request import AsyncRequest, Request + from ..types.fhir import ( + FhirBatchResult, + FhirCodeableConceptResult, + FhirResolveResult, + ) + + +def _build_resolve_body( + *, + system: str | None = None, + code: str | None = None, + display: str | None = None, + vocabulary_id: str | None = None, + resource_type: str | None = None, + include_recommendations: bool = False, + recommendations_limit: int = 5, + include_quality: bool = False, +) -> dict[str, Any]: + body: dict[str, Any] = {} + if system is not None: + body["system"] = system + if code is not None: + body["code"] = code + if display is not None: + body["display"] = display + if vocabulary_id is not None: + body["vocabulary_id"] = vocabulary_id + if resource_type is not None: + body["resource_type"] = resource_type + if include_recommendations: + body["include_recommendations"] = True + body["recommendations_limit"] = recommendations_limit + if include_quality: + body["include_quality"] = True + return body + + +class Fhir: + """Synchronous FHIR resolver resource. + + Provides access to the FHIR-to-OMOP Concept Resolver endpoints that + translate FHIR coded values into OMOP standard concepts, CDM target + tables, and optional Phoebe recommendations. + + Example: + >>> result = client.fhir.resolve( + ... system="http://snomed.info/sct", + ... code="44054006", + ... resource_type="Condition", + ... ) + >>> print(result["resolution"]["target_table"]) + "condition_occurrence" + """ + + def __init__(self, request: Request[Any]) -> None: + self._request = request + + def resolve( + self, + *, + system: str | None = None, + code: str | None = None, + display: str | None = None, + vocabulary_id: str | None = None, + resource_type: str | None = None, + include_recommendations: bool = False, + recommendations_limit: int = 5, + include_quality: bool = False, + ) -> FhirResolveResult: + """Resolve a single FHIR Coding to an OMOP standard concept. + + Provide at least one of (``system`` + ``code``), + (``vocabulary_id`` + ``code``), or ``display``. + + Args: + system: FHIR code system URI (e.g. ``http://snomed.info/sct``) + code: Code value from the FHIR Coding + display: Human-readable text (semantic search fallback) + vocabulary_id: Direct OMOP vocabulary_id, bypasses URI resolution + resource_type: FHIR resource type for domain alignment check + include_recommendations: Include Phoebe recommendations + recommendations_limit: Max recommendations to return (1-20) + include_quality: Include mapping quality signal + + Returns: + Resolution result with source concept, standard concept, + target CDM table, and optional enrichments. + """ + body = _build_resolve_body( + system=system, + code=code, + display=display, + vocabulary_id=vocabulary_id, + resource_type=resource_type, + include_recommendations=include_recommendations, + recommendations_limit=recommendations_limit, + include_quality=include_quality, + ) + return self._request.post("/fhir/resolve", json_data=body) + + def resolve_batch( + self, + codings: list[dict[str, str | None]], + *, + resource_type: str | None = None, + include_recommendations: bool = False, + recommendations_limit: int = 5, + include_quality: bool = False, + ) -> FhirBatchResult: + """Batch-resolve up to 100 FHIR Codings. + + Failed items are reported inline without failing the batch. + + Args: + codings: List of coding dicts, each with optional keys + ``system``, ``code``, ``display``, ``vocabulary_id``. + resource_type: FHIR resource type applied to all codings + include_recommendations: Include Phoebe recommendations + recommendations_limit: Max recommendations per item (1-20) + include_quality: Include mapping quality signal + + Returns: + Batch result with per-item results and a summary. + """ + body: dict[str, Any] = {"codings": codings} + if resource_type is not None: + body["resource_type"] = resource_type + if include_recommendations: + body["include_recommendations"] = True + body["recommendations_limit"] = recommendations_limit + if include_quality: + body["include_quality"] = True + return self._request.post("/fhir/resolve/batch", json_data=body) + + def resolve_codeable_concept( + self, + coding: list[dict[str, str]], + *, + text: str | None = None, + resource_type: str | None = None, + include_recommendations: bool = False, + recommendations_limit: int = 5, + include_quality: bool = False, + ) -> FhirCodeableConceptResult: + """Resolve a FHIR CodeableConcept with vocabulary preference. + + Picks the best match across multiple codings using the OHDSI + vocabulary preference order (SNOMED > RxNorm > LOINC > CVX > + ICD-10). Falls back to ``text`` via semantic search if no + coding resolves. + + Args: + coding: List of structured codings, each with ``system``, + ``code``, and optional ``display``. + text: CodeableConcept.text for semantic search fallback + resource_type: FHIR resource type for domain alignment + include_recommendations: Include Phoebe recommendations + recommendations_limit: Max recommendations (1-20) + include_quality: Include mapping quality signal + + Returns: + Result with ``best_match``, ``alternatives``, and + ``unresolved`` lists. + """ + body: dict[str, Any] = {"coding": coding} + if text is not None: + body["text"] = text + if resource_type is not None: + body["resource_type"] = resource_type + if include_recommendations: + body["include_recommendations"] = True + body["recommendations_limit"] = recommendations_limit + if include_quality: + body["include_quality"] = True + return self._request.post("/fhir/resolve/codeable-concept", json_data=body) + + +class AsyncFhir: + """Asynchronous FHIR resolver resource. + + Async counterpart of :class:`Fhir`. All methods are coroutines. + """ + + def __init__(self, request: AsyncRequest[Any]) -> None: + self._request = request + + async def resolve( + self, + *, + system: str | None = None, + code: str | None = None, + display: str | None = None, + vocabulary_id: str | None = None, + resource_type: str | None = None, + include_recommendations: bool = False, + recommendations_limit: int = 5, + include_quality: bool = False, + ) -> FhirResolveResult: + """Resolve a single FHIR Coding to an OMOP standard concept. + + See :meth:`Fhir.resolve` for full documentation. + """ + body = _build_resolve_body( + system=system, + code=code, + display=display, + vocabulary_id=vocabulary_id, + resource_type=resource_type, + include_recommendations=include_recommendations, + recommendations_limit=recommendations_limit, + include_quality=include_quality, + ) + return await self._request.post("/fhir/resolve", json_data=body) + + async def resolve_batch( + self, + codings: list[dict[str, str | None]], + *, + resource_type: str | None = None, + include_recommendations: bool = False, + recommendations_limit: int = 5, + include_quality: bool = False, + ) -> FhirBatchResult: + """Batch-resolve up to 100 FHIR Codings. + + See :meth:`Fhir.resolve_batch` for full documentation. + """ + body: dict[str, Any] = {"codings": codings} + if resource_type is not None: + body["resource_type"] = resource_type + if include_recommendations: + body["include_recommendations"] = True + body["recommendations_limit"] = recommendations_limit + if include_quality: + body["include_quality"] = True + return await self._request.post("/fhir/resolve/batch", json_data=body) + + async def resolve_codeable_concept( + self, + coding: list[dict[str, str]], + *, + text: str | None = None, + resource_type: str | None = None, + include_recommendations: bool = False, + recommendations_limit: int = 5, + include_quality: bool = False, + ) -> FhirCodeableConceptResult: + """Resolve a FHIR CodeableConcept with vocabulary preference. + + See :meth:`Fhir.resolve_codeable_concept` for full documentation. + """ + body: dict[str, Any] = {"coding": coding} + if text is not None: + body["text"] = text + if resource_type is not None: + body["resource_type"] = resource_type + if include_recommendations: + body["include_recommendations"] = True + body["recommendations_limit"] = recommendations_limit + if include_quality: + body["include_quality"] = True + return await self._request.post( + "/fhir/resolve/codeable-concept", json_data=body + ) diff --git a/src/omophub/resources/hierarchy.py b/src/omophub/resources/hierarchy.py index 3d2eaf3..5b7e793 100644 --- a/src/omophub/resources/hierarchy.py +++ b/src/omophub/resources/hierarchy.py @@ -14,17 +14,61 @@ class Hierarchy: def __init__(self, request: Request[Any]) -> None: self._request = request + def get( + self, + concept_id: int, + *, + format: str = "flat", + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + max_levels: int = 10, + max_results: int | None = None, + relationship_types: list[str] | None = None, + include_invalid: bool = False, + ) -> dict[str, Any]: + """Get complete concept hierarchy (ancestors and descendants). + + Args: + concept_id: The concept ID + format: Response format - "flat" (default) or "graph" for visualization + vocabulary_ids: Filter to specific vocabularies (e.g., ["SNOMED", "ICD10CM"]) + domain_ids: Filter to specific domains (e.g., ["Condition", "Drug"]) + max_levels: Maximum hierarchy levels to traverse in both directions (default 10) + max_results: Maximum results per direction for performance optimization + relationship_types: Relationship types to follow (default: "Is a") + include_invalid: Include deprecated/invalid concepts (default: False) + + Returns: + For flat format: ancestors, descendants arrays with level/total counts + For graph format: nodes and edges arrays for visualization + """ + params: dict[str, Any] = { + "format": format, + "max_levels": min(max_levels, 20), + } + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if max_results is not None: + params["max_results"] = max_results + if relationship_types: + params["relationship_types"] = ",".join(relationship_types) + if include_invalid: + params["include_invalid"] = "true" + + return self._request.get(f"/concepts/{concept_id}/hierarchy", params=params) + def ancestors( self, concept_id: int, *, - vocabulary_id: str | None = None, + vocabulary_ids: list[str] | None = None, max_levels: int | None = None, relationship_types: list[str] | None = None, include_paths: bool = False, include_distance: bool = True, - standard_only: bool = False, - include_deprecated: bool = False, + include_invalid: bool = False, page: int = 1, page_size: int = 100, ) -> dict[str, Any]: @@ -32,22 +76,21 @@ def ancestors( Args: concept_id: The concept ID - vocabulary_id: Filter to specific vocabulary + vocabulary_ids: Filter to specific vocabularies (e.g., ["SNOMED", "ICD10CM"]) max_levels: Maximum hierarchy levels to traverse relationship_types: Relationship types to follow (default: "Is a") - include_paths: Include path information - include_distance: Include distance from source - standard_only: Only return standard concepts - include_deprecated: Include deprecated concepts + include_paths: Include path_length field for each ancestor + include_distance: Include hierarchy_level field for each ancestor + include_invalid: Include deprecated/invalid concepts (default: False) page: Page number page_size: Results per page Returns: - Ancestors with hierarchy summary + Ancestors with hierarchy_summary and pagination metadata """ params: dict[str, Any] = {"page": page, "page_size": page_size} - if vocabulary_id: - params["vocabulary_id"] = vocabulary_id + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) if max_levels is not None: params["max_levels"] = max_levels if relationship_types: @@ -56,10 +99,8 @@ def ancestors( params["include_paths"] = "true" if include_distance: params["include_distance"] = "true" - if standard_only: - params["standard_only"] = "true" - if include_deprecated: - params["include_deprecated"] = "true" + if include_invalid: + params["include_invalid"] = "true" return self._request.get(f"/concepts/{concept_id}/ancestors", params=params) @@ -67,15 +108,13 @@ def descendants( self, concept_id: int, *, - vocabulary_id: str | None = None, + vocabulary_ids: list[str] | None = None, max_levels: int = 10, relationship_types: list[str] | None = None, include_distance: bool = True, - standard_only: bool = False, - include_deprecated: bool = False, + include_paths: bool = False, + include_invalid: bool = False, domain_ids: list[str] | None = None, - concept_class_ids: list[str] | None = None, - include_synonyms: bool = False, page: int = 1, page_size: int = 100, ) -> dict[str, Any]: @@ -83,42 +122,36 @@ def descendants( Args: concept_id: The concept ID - vocabulary_id: Filter to specific vocabulary - max_levels: Maximum hierarchy levels (default 10, max 10) - relationship_types: Relationship types to follow - include_distance: Include distance from source - standard_only: Only return standard concepts - include_deprecated: Include deprecated concepts - domain_ids: Filter by domains - concept_class_ids: Filter by concept classes - include_synonyms: Include synonyms + vocabulary_ids: Filter to specific vocabularies (e.g., ["SNOMED", "ICD10CM"]) + max_levels: Maximum hierarchy levels (default 10, max 20) + relationship_types: Relationship types to follow (default: "Is a") + include_distance: Include hierarchy_level field for each descendant + include_paths: Include path_length field for each descendant + include_invalid: Include deprecated/invalid concepts (default: False) + domain_ids: Filter by domains (e.g., ["Condition", "Drug"]) page: Page number page_size: Results per page Returns: - Descendants with hierarchy summary + Descendants with hierarchy_summary and pagination metadata """ params: dict[str, Any] = { - "max_levels": min(max_levels, 10), + "max_levels": min(max_levels, 20), "page": page, "page_size": page_size, } - if vocabulary_id: - params["vocabulary_id"] = vocabulary_id + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) if relationship_types: params["relationship_types"] = ",".join(relationship_types) if include_distance: params["include_distance"] = "true" - if standard_only: - params["standard_only"] = "true" - if include_deprecated: - params["include_deprecated"] = "true" + if include_paths: + params["include_paths"] = "true" + if include_invalid: + params["include_invalid"] = "true" if domain_ids: params["domain_ids"] = ",".join(domain_ids) - if concept_class_ids: - params["concept_class_ids"] = ",".join(concept_class_ids) - if include_synonyms: - params["include_synonyms"] = "true" return self._request.get(f"/concepts/{concept_id}/descendants", params=params) @@ -129,24 +162,55 @@ class AsyncHierarchy: def __init__(self, request: AsyncRequest[Any]) -> None: self._request = request + async def get( + self, + concept_id: int, + *, + format: str = "flat", + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + max_levels: int = 10, + max_results: int | None = None, + relationship_types: list[str] | None = None, + include_invalid: bool = False, + ) -> dict[str, Any]: + """Get complete concept hierarchy (ancestors and descendants).""" + params: dict[str, Any] = { + "format": format, + "max_levels": min(max_levels, 20), + } + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if max_results is not None: + params["max_results"] = max_results + if relationship_types: + params["relationship_types"] = ",".join(relationship_types) + if include_invalid: + params["include_invalid"] = "true" + + return await self._request.get( + f"/concepts/{concept_id}/hierarchy", params=params + ) + async def ancestors( self, concept_id: int, *, - vocabulary_id: str | None = None, + vocabulary_ids: list[str] | None = None, max_levels: int | None = None, relationship_types: list[str] | None = None, include_paths: bool = False, include_distance: bool = True, - standard_only: bool = False, - include_deprecated: bool = False, + include_invalid: bool = False, page: int = 1, page_size: int = 100, ) -> dict[str, Any]: """Get concept ancestors.""" params: dict[str, Any] = {"page": page, "page_size": page_size} - if vocabulary_id: - params["vocabulary_id"] = vocabulary_id + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) if max_levels is not None: params["max_levels"] = max_levels if relationship_types: @@ -155,10 +219,8 @@ async def ancestors( params["include_paths"] = "true" if include_distance: params["include_distance"] = "true" - if standard_only: - params["standard_only"] = "true" - if include_deprecated: - params["include_deprecated"] = "true" + if include_invalid: + params["include_invalid"] = "true" return await self._request.get( f"/concepts/{concept_id}/ancestors", params=params @@ -168,40 +230,34 @@ async def descendants( self, concept_id: int, *, - vocabulary_id: str | None = None, + vocabulary_ids: list[str] | None = None, max_levels: int = 10, relationship_types: list[str] | None = None, include_distance: bool = True, - standard_only: bool = False, - include_deprecated: bool = False, + include_paths: bool = False, + include_invalid: bool = False, domain_ids: list[str] | None = None, - concept_class_ids: list[str] | None = None, - include_synonyms: bool = False, page: int = 1, page_size: int = 100, ) -> dict[str, Any]: """Get concept descendants.""" params: dict[str, Any] = { - "max_levels": min(max_levels, 10), + "max_levels": min(max_levels, 20), "page": page, "page_size": page_size, } - if vocabulary_id: - params["vocabulary_id"] = vocabulary_id + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) if relationship_types: params["relationship_types"] = ",".join(relationship_types) if include_distance: params["include_distance"] = "true" - if standard_only: - params["standard_only"] = "true" - if include_deprecated: - params["include_deprecated"] = "true" + if include_paths: + params["include_paths"] = "true" + if include_invalid: + params["include_invalid"] = "true" if domain_ids: params["domain_ids"] = ",".join(domain_ids) - if concept_class_ids: - params["concept_class_ids"] = ",".join(concept_class_ids) - if include_synonyms: - params["include_synonyms"] = "true" return await self._request.get( f"/concepts/{concept_id}/descendants", params=params diff --git a/src/omophub/resources/mappings.py b/src/omophub/resources/mappings.py index b0ee0e2..1b3399e 100644 --- a/src/omophub/resources/mappings.py +++ b/src/omophub/resources/mappings.py @@ -18,98 +18,91 @@ def get( self, concept_id: int, *, - target_vocabularies: list[str] | None = None, - mapping_types: list[str] | None = None, - direction: str = "both", - include_indirect: bool = False, - standard_only: bool = False, - include_mapping_quality: bool = False, - include_synonyms: bool = False, - include_context: bool = False, - active_only: bool = True, - sort_by: str | None = None, - sort_order: str | None = None, - page: int = 1, - page_size: int = 50, + target_vocabulary: str | None = None, + include_invalid: bool = False, + vocab_release: str | None = None, ) -> dict[str, Any]: """Get mappings for a concept. Args: concept_id: The concept ID - target_vocabularies: Filter by target vocabularies - mapping_types: Filter by mapping types - direction: Mapping direction ("outgoing", "incoming", "both") - include_indirect: Include indirect mappings - standard_only: Only standard concept mappings - include_mapping_quality: Include quality metrics - include_synonyms: Include synonyms - include_context: Include mapping context - active_only: Only active mappings - sort_by: Sort field - sort_order: Sort order - page: Page number - page_size: Results per page + target_vocabulary: Filter to a specific target vocabulary (e.g., "ICD10CM") + include_invalid: Include invalid/deprecated mappings + vocab_release: Specific vocabulary release version (e.g., "2025.1") Returns: - Mappings with summary + Mappings for the concept """ - params: dict[str, Any] = { - "direction": direction, - "page": page, - "page_size": page_size, - } - if target_vocabularies: - params["target_vocabularies"] = ",".join(target_vocabularies) - if mapping_types: - params["mapping_types"] = ",".join(mapping_types) - if include_indirect: - params["include_indirect"] = "true" - if standard_only: - params["standard_only"] = "true" - if include_mapping_quality: - params["include_mapping_quality"] = "true" - if include_synonyms: - params["include_synonyms"] = "true" - if include_context: - params["include_context"] = "true" - if not active_only: - params["active_only"] = "false" - if sort_by: - params["sort_by"] = sort_by - if sort_order: - params["sort_order"] = sort_order - - return self._request.get(f"/concepts/{concept_id}/mappings", params=params) + params: dict[str, Any] = {} + if target_vocabulary: + params["target_vocabulary"] = target_vocabulary + if include_invalid: + params["include_invalid"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release + + return self._request.get( + f"/concepts/{concept_id}/mappings", params=params or None + ) def map( self, - source_concepts: list[int], target_vocabulary: str, *, + source_concepts: list[int] | None = None, + source_codes: list[dict[str, str]] | None = None, mapping_type: str | None = None, include_invalid: bool = False, + vocab_release: str | None = None, ) -> dict[str, Any]: """Map concepts to a target vocabulary. Args: - source_concepts: List of OMOP concept IDs to map - target_vocabulary: Target vocabulary ID (e.g., "ICD10CM", "SNOMED") - mapping_type: Mapping type (direct, equivalent, broader, narrower) + target_vocabulary: Target vocabulary ID (e.g., "ICD10CM", "SNOMED", "RxNorm") + source_concepts: List of OMOP concept IDs to map. Use this OR source_codes, + not both. + source_codes: List of vocabulary/code pairs to map, e.g., + [{"vocabulary_id": "SNOMED", "concept_code": "387517004"}]. + Use this OR source_concepts, not both. + mapping_type: Mapping type filter (direct, equivalent, broader, narrower) include_invalid: Include invalid mappings + vocab_release: Specific vocabulary release version (e.g., "2025.1") Returns: Mapping results with summary + + Raises: + ValueError: If neither or both source_concepts and source_codes are provided """ + # Validate: exactly one of source_concepts or source_codes required + has_concepts = source_concepts is not None and len(source_concepts) > 0 + has_codes = source_codes is not None and len(source_codes) > 0 + + if not has_concepts and not has_codes: + raise ValueError("Either source_concepts or source_codes is required") + if has_concepts and has_codes: + raise ValueError("Cannot use both source_concepts and source_codes") + body: dict[str, Any] = { - "source_concepts": source_concepts, "target_vocabulary": target_vocabulary, } + + if source_concepts: + body["source_concepts"] = source_concepts + if source_codes: + body["source_codes"] = source_codes if mapping_type: body["mapping_type"] = mapping_type if include_invalid: body["include_invalid"] = True - return self._request.post("/concepts/map", json_data=body) + params: dict[str, Any] = {} + if vocab_release: + params["vocab_release"] = vocab_release + + return self._request.post( + "/concepts/map", json_data=body, params=params or None + ) class AsyncMappings: @@ -122,97 +115,88 @@ async def get( self, concept_id: int, *, - target_vocabularies: list[str] | None = None, - mapping_types: list[str] | None = None, - direction: str = "both", - include_indirect: bool = False, - standard_only: bool = False, - include_mapping_quality: bool = False, - include_synonyms: bool = False, - include_context: bool = False, - active_only: bool = True, - sort_by: str | None = None, - sort_order: str | None = None, - page: int = 1, - page_size: int = 50, + target_vocabulary: str | None = None, + include_invalid: bool = False, + vocab_release: str | None = None, ) -> dict[str, Any]: """Get mappings for a concept. Args: concept_id: The concept ID - target_vocabularies: Filter by target vocabularies - mapping_types: Filter by mapping types - direction: Mapping direction ("outgoing", "incoming", "both") - include_indirect: Include indirect mappings - standard_only: Only standard concept mappings - include_mapping_quality: Include quality metrics - include_synonyms: Include synonyms - include_context: Include mapping context - active_only: Only active mappings - sort_by: Sort field - sort_order: Sort order - page: Page number - page_size: Results per page + target_vocabulary: Filter to a specific target vocabulary (e.g., "ICD10CM") + include_invalid: Include invalid/deprecated mappings + vocab_release: Specific vocabulary release version (e.g., "2025.1") Returns: - Mappings with summary + Mappings for the concept """ - params: dict[str, Any] = { - "direction": direction, - "page": page, - "page_size": page_size, - } - if target_vocabularies: - params["target_vocabularies"] = ",".join(target_vocabularies) - if mapping_types: - params["mapping_types"] = ",".join(mapping_types) - if include_indirect: - params["include_indirect"] = "true" - if standard_only: - params["standard_only"] = "true" - if include_mapping_quality: - params["include_mapping_quality"] = "true" - if include_synonyms: - params["include_synonyms"] = "true" - if include_context: - params["include_context"] = "true" - if not active_only: - params["active_only"] = "false" - if sort_by: - params["sort_by"] = sort_by - if sort_order: - params["sort_order"] = sort_order + params: dict[str, Any] = {} + if target_vocabulary: + params["target_vocabulary"] = target_vocabulary + if include_invalid: + params["include_invalid"] = "true" + if vocab_release: + params["vocab_release"] = vocab_release return await self._request.get( - f"/concepts/{concept_id}/mappings", params=params + f"/concepts/{concept_id}/mappings", params=params or None ) async def map( self, - source_concepts: list[int], target_vocabulary: str, *, + source_concepts: list[int] | None = None, + source_codes: list[dict[str, str]] | None = None, mapping_type: str | None = None, include_invalid: bool = False, + vocab_release: str | None = None, ) -> dict[str, Any]: """Map concepts to a target vocabulary. Args: - source_concepts: List of OMOP concept IDs to map - target_vocabulary: Target vocabulary ID (e.g., "ICD10CM", "SNOMED") - mapping_type: Mapping type (direct, equivalent, broader, narrower) + target_vocabulary: Target vocabulary ID (e.g., "ICD10CM", "SNOMED", "RxNorm") + source_concepts: List of OMOP concept IDs to map. Use this OR source_codes, + not both. + source_codes: List of vocabulary/code pairs to map, e.g., + [{"vocabulary_id": "SNOMED", "concept_code": "387517004"}]. + Use this OR source_concepts, not both. + mapping_type: Mapping type filter (direct, equivalent, broader, narrower) include_invalid: Include invalid mappings + vocab_release: Specific vocabulary release version (e.g., "2025.1") Returns: Mapping results with summary + + Raises: + ValueError: If neither or both source_concepts and source_codes are provided """ + # Validate: exactly one of source_concepts or source_codes required + has_concepts = source_concepts is not None and len(source_concepts) > 0 + has_codes = source_codes is not None and len(source_codes) > 0 + + if not has_concepts and not has_codes: + raise ValueError("Either source_concepts or source_codes is required") + if has_concepts and has_codes: + raise ValueError("Cannot use both source_concepts and source_codes") + body: dict[str, Any] = { - "source_concepts": source_concepts, "target_vocabulary": target_vocabulary, } + + if source_concepts: + body["source_concepts"] = source_concepts + if source_codes: + body["source_codes"] = source_codes if mapping_type: body["mapping_type"] = mapping_type if include_invalid: body["include_invalid"] = True - return await self._request.post("/concepts/map", json_data=body) + params: dict[str, Any] = {} + if vocab_release: + params["vocab_release"] = vocab_release + + return await self._request.post( + "/concepts/map", json_data=body, params=params or None + ) diff --git a/src/omophub/resources/relationships.py b/src/omophub/resources/relationships.py index eea18b9..7da8929 100644 --- a/src/omophub/resources/relationships.py +++ b/src/omophub/resources/relationships.py @@ -18,80 +18,63 @@ def get( self, concept_id: int, *, - relationship_type: str | None = None, - target_vocabulary: str | None = None, + relationship_ids: list[str] | None = None, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_only: bool = False, include_invalid: bool = False, + include_reverse: bool = False, page: int = 1, - page_size: int = 50, + page_size: int = 100, ) -> dict[str, Any]: """Get relationships for a concept. Args: concept_id: The concept ID - relationship_type: Filter by relationship type - target_vocabulary: Filter by target vocabulary + relationship_ids: Filter by relationship IDs (e.g., ["Is a", "Maps to"]) + vocabulary_ids: Filter by vocabulary IDs + domain_ids: Filter by domain IDs + standard_only: Only include relationships to standard concepts include_invalid: Include invalid relationships + include_reverse: Include reverse relationships page: Page number - page_size: Results per page + page_size: Results per page (max 1000) Returns: - Relationships with summary + Relationships with pagination metadata """ params: dict[str, Any] = {"page": page, "page_size": page_size} - if relationship_type: - params["relationship_type"] = relationship_type - if target_vocabulary: - params["target_vocabulary"] = target_vocabulary + if relationship_ids: + params["relationship_ids"] = ",".join(relationship_ids) + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if standard_only: + params["standard_only"] = "true" if include_invalid: params["include_invalid"] = "true" + if include_reverse: + params["include_reverse"] = "true" return self._request.get(f"/concepts/{concept_id}/relationships", params=params) def types( self, *, - vocabulary_ids: list[str] | None = None, - include_reverse: bool = False, - include_usage_stats: bool = False, - include_examples: bool = False, - category: str | None = None, - is_defining: bool | None = None, - standard_only: bool = False, page: int = 1, page_size: int = 100, ) -> dict[str, Any]: - """Get available relationship types. + """Get available relationship types from the OMOP CDM. Args: - vocabulary_ids: Filter by vocabularies - include_reverse: Include reverse relationships - include_usage_stats: Include usage statistics - include_examples: Include example concepts - category: Filter by category - is_defining: Filter by defining status - standard_only: Only standard relationships - page: Page number - page_size: Results per page + page: Page number (1-based) + page_size: Results per page (max 500) Returns: - Relationship types with metadata + Relationship types with pagination metadata """ params: dict[str, Any] = {"page": page, "page_size": page_size} - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - if include_reverse: - params["include_reverse"] = "true" - if include_usage_stats: - params["include_usage_stats"] = "true" - if include_examples: - params["include_examples"] = "true" - if category: - params["category"] = category - if is_defining is not None: - params["is_defining"] = "true" if is_defining else "false" - if standard_only: - params["standard_only"] = "true" - return self._request.get("/relationships/types", params=params) @@ -105,32 +88,44 @@ async def get( self, concept_id: int, *, - relationship_type: str | None = None, - target_vocabulary: str | None = None, + relationship_ids: list[str] | None = None, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_only: bool = False, include_invalid: bool = False, + include_reverse: bool = False, page: int = 1, - page_size: int = 50, + page_size: int = 100, ) -> dict[str, Any]: """Get relationships for a concept. Args: concept_id: The concept ID - relationship_type: Filter by relationship type - target_vocabulary: Filter by target vocabulary + relationship_ids: Filter by relationship IDs (e.g., ["Is a", "Maps to"]) + vocabulary_ids: Filter by vocabulary IDs + domain_ids: Filter by domain IDs + standard_only: Only include relationships to standard concepts include_invalid: Include invalid relationships + include_reverse: Include reverse relationships page: Page number - page_size: Results per page + page_size: Results per page (max 1000) Returns: - Relationships with summary + Relationships with pagination metadata """ params: dict[str, Any] = {"page": page, "page_size": page_size} - if relationship_type: - params["relationship_type"] = relationship_type - if target_vocabulary: - params["target_vocabulary"] = target_vocabulary + if relationship_ids: + params["relationship_ids"] = ",".join(relationship_ids) + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if standard_only: + params["standard_only"] = "true" if include_invalid: params["include_invalid"] = "true" + if include_reverse: + params["include_reverse"] = "true" return await self._request.get( f"/concepts/{concept_id}/relationships", params=params @@ -139,46 +134,17 @@ async def get( async def types( self, *, - vocabulary_ids: list[str] | None = None, - include_reverse: bool = False, - include_usage_stats: bool = False, - include_examples: bool = False, - category: str | None = None, - is_defining: bool | None = None, - standard_only: bool = False, page: int = 1, page_size: int = 100, ) -> dict[str, Any]: - """Get available relationship types. + """Get available relationship types from the OMOP CDM. Args: - vocabulary_ids: Filter by vocabularies - include_reverse: Include reverse relationships - include_usage_stats: Include usage statistics - include_examples: Include example concepts - category: Filter by category - is_defining: Filter by defining status - standard_only: Only standard relationships - page: Page number - page_size: Results per page + page: Page number (1-based) + page_size: Results per page (max 500) Returns: - Relationship types with metadata + Relationship types with pagination metadata """ params: dict[str, Any] = {"page": page, "page_size": page_size} - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - if include_reverse: - params["include_reverse"] = "true" - if include_usage_stats: - params["include_usage_stats"] = "true" - if include_examples: - params["include_examples"] = "true" - if category: - params["category"] = category - if is_defining is not None: - params["is_defining"] = "true" if is_defining else "false" - if standard_only: - params["standard_only"] = "true" - return await self._request.get("/relationships/types", params=params) diff --git a/src/omophub/resources/search.py b/src/omophub/resources/search.py index 5873856..e63eefb 100644 --- a/src/omophub/resources/search.py +++ b/src/omophub/resources/search.py @@ -2,17 +2,28 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, TypedDict +from typing import TYPE_CHECKING, Any, Literal, TypedDict -from .._pagination import DEFAULT_PAGE_SIZE, paginate_sync +from .._pagination import DEFAULT_PAGE_SIZE, paginate_async, paginate_sync if TYPE_CHECKING: - from collections.abc import Iterator + from collections.abc import AsyncIterator, Iterator from .._request import AsyncRequest, Request from ..types.common import PaginationMeta from ..types.concept import Concept - from ..types.search import SearchResult, Suggestion + from ..types.search import ( + BulkSearchDefaults, + BulkSearchInput, + BulkSearchResponse, + BulkSemanticSearchDefaults, + BulkSemanticSearchInput, + BulkSemanticSearchResponse, + SearchResult, + SemanticSearchResult, + SimilarSearchResult, + Suggestion, + ) class BasicSearchParams(TypedDict, total=False): @@ -37,15 +48,15 @@ class AdvancedSearchParams(TypedDict, total=False): """Parameters for advanced search.""" query: str - vocabularies: list[str] - domains: list[str] - concept_classes: list[str] + vocabulary_ids: list[str] + domain_ids: list[str] + concept_class_ids: list[str] standard_concepts_only: bool include_invalid: bool relationship_filters: list[dict[str, Any]] date_range: dict[str, str] - limit: int - offset: int + page: int + page_size: int class Search: @@ -125,8 +136,15 @@ def basic_iter( *, vocabulary_ids: list[str] | None = None, domain_ids: list[str] | None = None, + concept_class_ids: list[str] | None = None, + standard_concept: str | None = None, + include_synonyms: bool = False, + include_invalid: bool = False, + min_score: float | None = None, + exact_match: bool = False, page_size: int = DEFAULT_PAGE_SIZE, - **kwargs: Any, + sort_by: str | None = None, + sort_order: str | None = None, ) -> Iterator[Concept]: """Iterate through all search results with auto-pagination. @@ -134,8 +152,15 @@ def basic_iter( query: Search query string vocabulary_ids: Filter by vocabulary IDs domain_ids: Filter by domain IDs + concept_class_ids: Filter by concept class IDs + standard_concept: Filter by standard concept ("S", "C", or None) + include_synonyms: Search in synonyms + include_invalid: Include invalid concepts + min_score: Minimum relevance score + exact_match: Require exact match page_size: Results per page - **kwargs: Additional search parameters + sort_by: Sort field + sort_order: Sort order ("asc" or "desc") Yields: Individual concepts from all pages @@ -144,22 +169,40 @@ def basic_iter( def fetch_page( page: int, size: int ) -> tuple[list[Concept], PaginationMeta | None]: - result = self.basic( - query, - vocabulary_ids=vocabulary_ids, - domain_ids=domain_ids, - page=page, - page_size=size, - **kwargs, - ) - concepts = ( - result.get("concepts", result) if isinstance(result, dict) else result - ) - meta = ( - result.get("meta", {}).get("pagination") - if isinstance(result, dict) - else None - ) + # Build params manually to use get_raw() for full response with meta + params: dict[str, Any] = { + "query": query, + "page": page, + "page_size": size, + } + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if concept_class_ids: + params["concept_class_ids"] = ",".join(concept_class_ids) + if standard_concept: + params["standard_concept"] = standard_concept + if include_synonyms: + params["include_synonyms"] = "true" + if include_invalid: + params["include_invalid"] = "true" + if min_score is not None: + params["min_score"] = min_score + if exact_match: + params["exact_match"] = "true" + if sort_by: + params["sort_by"] = sort_by + if sort_order: + params["sort_order"] = sort_order + + # Use get_raw() to preserve pagination metadata + result = self._request.get_raw("/search/concepts", params=params) + + # Extract concepts from 'data' field (may be list or dict with 'concepts') + data = result.get("data", []) + concepts = data.get("concepts", data) if isinstance(data, dict) else data + meta = result.get("meta", {}).get("pagination") return concepts, meta yield from paginate_sync(fetch_page, page_size) @@ -168,50 +211,50 @@ def advanced( self, query: str, *, - vocabularies: list[str] | None = None, - domains: list[str] | None = None, - concept_classes: list[str] | None = None, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + concept_class_ids: list[str] | None = None, standard_concepts_only: bool = False, include_invalid: bool = False, relationship_filters: list[dict[str, Any]] | None = None, - limit: int = 20, - offset: int = 0, + page: int = 1, + page_size: int = 20, ) -> SearchResult: """Advanced concept search with facets. Args: query: Search query string - vocabularies: Filter by vocabularies - domains: Filter by domains - concept_classes: Filter by concept classes + vocabulary_ids: Filter by vocabulary IDs + domain_ids: Filter by domain IDs + concept_class_ids: Filter by concept class IDs standard_concepts_only: Only return standard concepts include_invalid: Include invalid concepts relationship_filters: Relationship-based filters - limit: Maximum results - offset: Result offset + page: Page number (1-based) + page_size: Results per page Returns: Search results with facets and metadata """ body: dict[str, Any] = {"query": query} - if vocabularies: - body["vocabularies"] = vocabularies - if domains: - body["domains"] = domains - if concept_classes: - body["concept_classes"] = concept_classes + if vocabulary_ids: + body["vocabulary_ids"] = vocabulary_ids + if domain_ids: + body["domain_ids"] = domain_ids + if concept_class_ids: + body["concept_class_ids"] = concept_class_ids if standard_concepts_only: body["standard_concepts_only"] = True if include_invalid: body["include_invalid"] = True if relationship_filters: body["relationship_filters"] = relationship_filters - if limit != 20: - body["limit"] = limit - if offset > 0: - body["offset"] = offset + if page != 1: + body["page"] = page + if page_size != 20: + body["page_size"] = page_size - return self._request.post("/concepts/search/advanced", json_data=body) + return self._request.post("/search/advanced", json_data=body) def autocomplete( self, @@ -219,7 +262,7 @@ def autocomplete( *, vocabulary_ids: list[str] | None = None, domains: list[str] | None = None, - max_suggestions: int = 10, + page_size: int = 10, ) -> list[Suggestion]: """Get autocomplete suggestions. @@ -227,12 +270,12 @@ def autocomplete( query: Partial query string vocabulary_ids: Filter by vocabulary IDs domains: Filter by domains - max_suggestions: Maximum suggestions + page_size: Maximum suggestions to return Returns: Autocomplete suggestions """ - params: dict[str, Any] = {"query": query, "max_suggestions": max_suggestions} + params: dict[str, Any] = {"query": query, "page_size": page_size} if vocabulary_ids: params["vocabulary_ids"] = ",".join(vocabulary_ids) if domains: @@ -240,6 +283,250 @@ def autocomplete( return self._request.get("/search/suggest", params=params) + def semantic( + self, + query: str, + *, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_concept: Literal["S", "C"] | None = None, + concept_class_id: str | None = None, + threshold: float | None = None, + page: int = 1, + page_size: int = 20, + ) -> dict[str, Any]: + """Semantic concept search using neural embeddings. + + Args: + query: Natural language search query + vocabulary_ids: Filter by vocabulary IDs + domain_ids: Filter by domain IDs + standard_concept: Filter by standard concept flag ('S' or 'C') + concept_class_id: Filter by concept class + threshold: Minimum similarity threshold (0.0-1.0, default 0.5) + page: Page number (1-based) + page_size: Results per page (max 100) + + Returns: + Semantic search results with similarity scores + """ + params: dict[str, Any] = {"query": query, "page": page, "page_size": page_size} + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if standard_concept: + params["standard_concept"] = standard_concept + if concept_class_id: + params["concept_class_id"] = concept_class_id + if threshold is not None: + params["threshold"] = threshold + + return self._request.get("/concepts/semantic-search", params=params) + + def semantic_iter( + self, + query: str, + *, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_concept: Literal["S", "C"] | None = None, + concept_class_id: str | None = None, + threshold: float | None = None, + page_size: int = DEFAULT_PAGE_SIZE, + ) -> Iterator[SemanticSearchResult]: + """Iterate through all semantic search results with auto-pagination. + + Args: + query: Natural language search query + vocabulary_ids: Filter by vocabulary IDs + domain_ids: Filter by domain IDs + standard_concept: Filter by standard concept flag ('S' or 'C') + concept_class_id: Filter by concept class + threshold: Minimum similarity threshold (0.0-1.0) + page_size: Results per page + + Yields: + Individual semantic search results from all pages + """ + + def fetch_page( + page: int, size: int + ) -> tuple[list[SemanticSearchResult], PaginationMeta | None]: + params: dict[str, Any] = { + "query": query, + "page": page, + "page_size": size, + } + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if standard_concept: + params["standard_concept"] = standard_concept + if concept_class_id: + params["concept_class_id"] = concept_class_id + if threshold is not None: + params["threshold"] = threshold + + result = self._request.get_raw("/concepts/semantic-search", params=params) + + data = result.get("data", []) + results = data.get("results", data) if isinstance(data, dict) else data + meta = result.get("meta", {}).get("pagination") + return results, meta + + yield from paginate_sync(fetch_page, page_size) + + def bulk_basic( + self, + searches: list[BulkSearchInput], + *, + defaults: BulkSearchDefaults | None = None, + ) -> BulkSearchResponse: + """Execute multiple lexical searches in a single request. + + Sends up to 50 search queries in one API call. Each search can have + its own filters, or you can set shared defaults. + + Args: + searches: List of search inputs, each with a unique ``search_id`` + and ``query``. Max 50 items. + defaults: Default filters applied to all searches. Individual + search-level values override defaults. + + Returns: + Bulk results with per-search status, results, and timing. + + Example:: + + results = client.search.bulk_basic([ + {"search_id": "q1", "query": "diabetes"}, + {"search_id": "q2", "query": "hypertension"}, + ], defaults={"vocabulary_ids": ["SNOMED"], "page_size": 5}) + + for item in results["results"]: + print(item["search_id"], len(item["results"])) + """ + body: dict[str, Any] = {"searches": searches} + if defaults: + body["defaults"] = defaults + return self._request.post("/search/bulk", json_data=body) + + def bulk_semantic( + self, + searches: list[BulkSemanticSearchInput], + *, + defaults: BulkSemanticSearchDefaults | None = None, + ) -> BulkSemanticSearchResponse: + """Execute multiple semantic searches in a single request. + + Sends up to 25 natural-language queries in one API call using neural + embeddings. Each search can have its own filters and threshold. + + Args: + searches: List of search inputs, each with a unique ``search_id`` + and ``query`` (1-500 chars). Max 25 items. + defaults: Default filters applied to all searches. Individual + search-level values override defaults. + + Returns: + Bulk results with per-search status, similarity scores, and + optional query enhancements. + + Example:: + + results = client.search.bulk_semantic([ + {"search_id": "s1", "query": "heart failure treatment"}, + {"search_id": "s2", "query": "type 2 diabetes medication"}, + ], defaults={"threshold": 0.8, "page_size": 10}) + + for item in results["results"]: + print(item["search_id"], item.get("result_count", 0)) + """ + body: dict[str, Any] = {"searches": searches} + if defaults: + body["defaults"] = defaults + return self._request.post("/search/semantic-bulk", json_data=body) + + def similar( + self, + *, + concept_id: int | None = None, + concept_name: str | None = None, + query: str | None = None, + algorithm: Literal["semantic", "lexical", "hybrid"] = "hybrid", + similarity_threshold: float = 0.7, + page_size: int = 20, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_concept: Literal["S", "C", "N"] | None = None, + include_invalid: bool | None = None, + include_scores: bool | None = None, + include_explanations: bool | None = None, + ) -> SimilarSearchResult: + """Find concepts similar to a reference concept or query. + + Must provide exactly one of: concept_id, concept_name, or query. + + Args: + concept_id: Find concepts similar to this concept ID + concept_name: Find concepts similar to this name + query: Natural language query for semantic similarity + algorithm: 'semantic' (neural), 'lexical' (text), or 'hybrid' (both) + similarity_threshold: Minimum similarity (0.0-1.0) + page_size: Max results to return (max 1000) + vocabulary_ids: Filter by vocabulary IDs + domain_ids: Filter by domain IDs + standard_concept: Filter by standard concept flag + include_invalid: Include invalid/deprecated concepts + include_scores: Include detailed similarity scores + include_explanations: Include similarity explanations + + Returns: + Similar concepts with similarity scores and metadata + + Raises: + ValueError: If not exactly one of concept_id, concept_name, or query + is provided. + + Note: + When algorithm='semantic', only single vocabulary/domain filter supported. + """ + # Validate exactly one input source provided + input_count = sum(x is not None for x in [concept_id, concept_name, query]) + if input_count != 1: + raise ValueError( + "Exactly one of concept_id, concept_name, or query must be provided" + ) + + body: dict[str, Any] = { + "algorithm": algorithm, + "similarity_threshold": similarity_threshold, + } + if concept_id is not None: + body["concept_id"] = concept_id + if concept_name is not None: + body["concept_name"] = concept_name + if query is not None: + body["query"] = query + if page_size != 20: + body["page_size"] = page_size + if vocabulary_ids: + body["vocabulary_ids"] = vocabulary_ids + if domain_ids: + body["domain_ids"] = domain_ids + if standard_concept: + body["standard_concept"] = standard_concept + if include_invalid is not None: + body["include_invalid"] = include_invalid + if include_scores is not None: + body["include_scores"] = include_scores + if include_explanations is not None: + body["include_explanations"] = include_explanations + + return self._request.post("/search/similar", json_data=body) + class AsyncSearch: """Asynchronous search resource.""" @@ -297,35 +584,35 @@ async def advanced( self, query: str, *, - vocabularies: list[str] | None = None, - domains: list[str] | None = None, - concept_classes: list[str] | None = None, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + concept_class_ids: list[str] | None = None, standard_concepts_only: bool = False, include_invalid: bool = False, relationship_filters: list[dict[str, Any]] | None = None, - limit: int = 20, - offset: int = 0, + page: int = 1, + page_size: int = 20, ) -> SearchResult: """Advanced concept search with facets.""" body: dict[str, Any] = {"query": query} - if vocabularies: - body["vocabularies"] = vocabularies - if domains: - body["domains"] = domains - if concept_classes: - body["concept_classes"] = concept_classes + if vocabulary_ids: + body["vocabulary_ids"] = vocabulary_ids + if domain_ids: + body["domain_ids"] = domain_ids + if concept_class_ids: + body["concept_class_ids"] = concept_class_ids if standard_concepts_only: body["standard_concepts_only"] = True if include_invalid: body["include_invalid"] = True if relationship_filters: body["relationship_filters"] = relationship_filters - if limit != 20: - body["limit"] = limit - if offset > 0: - body["offset"] = offset + if page != 1: + body["page"] = page + if page_size != 20: + body["page_size"] = page_size - return await self._request.post("/concepts/search/advanced", json_data=body) + return await self._request.post("/search/advanced", json_data=body) async def autocomplete( self, @@ -333,13 +620,183 @@ async def autocomplete( *, vocabulary_ids: list[str] | None = None, domains: list[str] | None = None, - max_suggestions: int = 10, + page_size: int = 10, ) -> list[Suggestion]: """Get autocomplete suggestions.""" - params: dict[str, Any] = {"query": query, "max_suggestions": max_suggestions} + params: dict[str, Any] = {"query": query, "page_size": page_size} if vocabulary_ids: params["vocabulary_ids"] = ",".join(vocabulary_ids) if domains: params["domains"] = ",".join(domains) return await self._request.get("/search/suggest", params=params) + + async def semantic( + self, + query: str, + *, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_concept: Literal["S", "C"] | None = None, + concept_class_id: str | None = None, + threshold: float | None = None, + page: int = 1, + page_size: int = 20, + ) -> dict[str, Any]: + """Semantic concept search using neural embeddings.""" + params: dict[str, Any] = {"query": query, "page": page, "page_size": page_size} + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if standard_concept: + params["standard_concept"] = standard_concept + if concept_class_id: + params["concept_class_id"] = concept_class_id + if threshold is not None: + params["threshold"] = threshold + + return await self._request.get("/concepts/semantic-search", params=params) + + async def semantic_iter( + self, + query: str, + *, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_concept: Literal["S", "C"] | None = None, + concept_class_id: str | None = None, + threshold: float | None = None, + page_size: int = DEFAULT_PAGE_SIZE, + ) -> AsyncIterator[SemanticSearchResult]: + """Iterate through all semantic search results with auto-pagination.""" + + async def fetch_page( + page: int, size: int + ) -> tuple[list[SemanticSearchResult], PaginationMeta | None]: + params: dict[str, Any] = { + "query": query, + "page": page, + "page_size": size, + } + if vocabulary_ids: + params["vocabulary_ids"] = ",".join(vocabulary_ids) + if domain_ids: + params["domain_ids"] = ",".join(domain_ids) + if standard_concept: + params["standard_concept"] = standard_concept + if concept_class_id: + params["concept_class_id"] = concept_class_id + if threshold is not None: + params["threshold"] = threshold + + result = await self._request.get_raw( + "/concepts/semantic-search", params=params + ) + + data = result.get("data", []) + results = data.get("results", data) if isinstance(data, dict) else data + meta = result.get("meta", {}).get("pagination") + return results, meta + + item: SemanticSearchResult + async for item in paginate_async(fetch_page, page_size): + yield item + + async def bulk_basic( + self, + searches: list[BulkSearchInput], + *, + defaults: BulkSearchDefaults | None = None, + ) -> BulkSearchResponse: + """Execute multiple lexical searches in a single request. + + Args: + searches: List of search inputs (max 50). + defaults: Default filters for all searches. + + Returns: + Bulk results with per-search status and results. + """ + body: dict[str, Any] = {"searches": searches} + if defaults: + body["defaults"] = defaults + return await self._request.post("/search/bulk", json_data=body) + + async def bulk_semantic( + self, + searches: list[BulkSemanticSearchInput], + *, + defaults: BulkSemanticSearchDefaults | None = None, + ) -> BulkSemanticSearchResponse: + """Execute multiple semantic searches in a single request. + + Args: + searches: List of search inputs (max 25). + defaults: Default filters for all searches. + + Returns: + Bulk results with per-search status and similarity scores. + """ + body: dict[str, Any] = {"searches": searches} + if defaults: + body["defaults"] = defaults + return await self._request.post("/search/semantic-bulk", json_data=body) + + async def similar( + self, + *, + concept_id: int | None = None, + concept_name: str | None = None, + query: str | None = None, + algorithm: Literal["semantic", "lexical", "hybrid"] = "hybrid", + similarity_threshold: float = 0.7, + page_size: int = 20, + vocabulary_ids: list[str] | None = None, + domain_ids: list[str] | None = None, + standard_concept: Literal["S", "C", "N"] | None = None, + include_invalid: bool | None = None, + include_scores: bool | None = None, + include_explanations: bool | None = None, + ) -> SimilarSearchResult: + """Find concepts similar to a reference concept or query. + + Must provide exactly one of: concept_id, concept_name, or query. + + Raises: + ValueError: If not exactly one of concept_id, concept_name, or query + is provided. + """ + # Validate exactly one input source provided + input_count = sum(x is not None for x in [concept_id, concept_name, query]) + if input_count != 1: + raise ValueError( + "Exactly one of concept_id, concept_name, or query must be provided" + ) + + body: dict[str, Any] = { + "algorithm": algorithm, + "similarity_threshold": similarity_threshold, + } + if concept_id is not None: + body["concept_id"] = concept_id + if concept_name is not None: + body["concept_name"] = concept_name + if query is not None: + body["query"] = query + if page_size != 20: + body["page_size"] = page_size + if vocabulary_ids: + body["vocabulary_ids"] = vocabulary_ids + if domain_ids: + body["domain_ids"] = domain_ids + if standard_concept: + body["standard_concept"] = standard_concept + if include_invalid is not None: + body["include_invalid"] = include_invalid + if include_scores is not None: + body["include_scores"] = include_scores + if include_explanations is not None: + body["include_explanations"] = include_explanations + + return await self._request.post("/search/similar", json_data=body) diff --git a/src/omophub/resources/vocabularies.py b/src/omophub/resources/vocabularies.py index 456cfda..f298cdf 100644 --- a/src/omophub/resources/vocabularies.py +++ b/src/omophub/resources/vocabularies.py @@ -23,7 +23,7 @@ def list( sort_by: str = "name", sort_order: str = "asc", page: int = 1, - page_size: int = 100, + page_size: int = 20, ) -> dict[str, Any]: """List all vocabularies. @@ -51,32 +51,17 @@ def list( return self._request.get("/vocabularies", params=params) - def get( - self, - vocabulary_id: str, - *, - include_stats: bool = False, - include_domains: bool = False, - ) -> Vocabulary: + def get(self, vocabulary_id: str) -> Vocabulary: """Get vocabulary details. Args: vocabulary_id: The vocabulary ID - include_stats: Include statistics - include_domains: Include domain breakdown Returns: - Vocabulary details + Vocabulary details including vocabulary_id, vocabulary_name, + vocabulary_reference, vocabulary_version, vocabulary_concept_id """ - params: dict[str, Any] = {} - if include_stats: - params["include_stats"] = "true" - if include_domains: - params["include_domains"] = "true" - - return self._request.get( - f"/vocabularies/{vocabulary_id}", params=params or None - ) + return self._request.get(f"/vocabularies/{vocabulary_id}") def stats(self, vocabulary_id: str) -> VocabularyStats: """Get vocabulary statistics. @@ -89,58 +74,83 @@ def stats(self, vocabulary_id: str) -> VocabularyStats: """ return self._request.get(f"/vocabularies/{vocabulary_id}/stats") - def domains( - self, - *, - vocabulary_ids: list[str] | None = None, - page: int = 1, - page_size: int = 50, - ) -> dict[str, Any]: - """Get vocabulary domains. + def domain_stats(self, vocabulary_id: str, domain_id: str) -> dict[str, Any]: + """Get statistics for a specific domain within a vocabulary. Args: - vocabulary_ids: Filter by vocabulary IDs (optional) - page: Page number - page_size: Results per page + vocabulary_id: The vocabulary ID (e.g., "SNOMED", "ICD10CM") + domain_id: The domain ID (e.g., "Condition", "Drug", "Procedure") Returns: - Domain statistics for vocabularies + Domain statistics including concept counts and class breakdown """ - params: dict[str, Any] = {"page": page, "page_size": page_size} - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - return self._request.get("/vocabularies/domains", params=params) + return self._request.get( + f"/vocabularies/{vocabulary_id}/stats/domains/{domain_id}" + ) + + def domains(self) -> dict[str, Any]: + """Get all standard OHDSI domains. + + Returns: + List of all available domains with domain_id, domain_name, and description + """ + return self._request.get("/vocabularies/domains") + + def concept_classes(self) -> dict[str, Any]: + """Get all concept classes. + + Returns: + List of all available concept classes with concept_class_id, + concept_class_name, and concept_class_concept_id + """ + return self._request.get("/vocabularies/concept-classes") def concepts( self, vocabulary_id: str, *, - domain_id: str | None = None, - concept_class_id: str | None = None, - standard_only: bool = False, + search: str | None = None, + standard_concept: str = "all", + include_invalid: bool = False, + include_relationships: bool = False, + include_synonyms: bool = False, + sort_by: str = "name", + sort_order: str = "asc", page: int = 1, - page_size: int = 50, + page_size: int = 20, ) -> dict[str, Any]: """Get concepts in a vocabulary. Args: vocabulary_id: The vocabulary ID - domain_id: Filter by domain - concept_class_id: Filter by concept class - standard_only: Only standard concepts + search: Search term to filter concepts by name or code + standard_concept: Filter by standard concept status ('S', 'C', 'all') + include_invalid: Include invalid or deprecated concepts + include_relationships: Include concept relationships + include_synonyms: Include concept synonyms + sort_by: Sort field ('name', 'concept_id', 'concept_code') + sort_order: Sort order ('asc' or 'desc') page: Page number - page_size: Results per page + page_size: Results per page (max 1000) Returns: Paginated concepts """ - params: dict[str, Any] = {"page": page, "page_size": page_size} - if domain_id: - params["domain_id"] = domain_id - if concept_class_id: - params["concept_class_id"] = concept_class_id - if standard_only: - params["standard_only"] = "true" + params: dict[str, Any] = { + "page": page, + "page_size": page_size, + "standard_concept": standard_concept, + "sort_by": sort_by, + "sort_order": sort_order, + } + if search: + params["search"] = search + if include_invalid: + params["include_invalid"] = "true" + if include_relationships: + params["include_relationships"] = "true" + if include_synonyms: + params["include_synonyms"] = "true" return self._request.get( f"/vocabularies/{vocabulary_id}/concepts", params=params @@ -161,7 +171,7 @@ async def list( sort_by: str = "name", sort_order: str = "asc", page: int = 1, - page_size: int = 100, + page_size: int = 20, ) -> dict[str, Any]: """List all vocabularies.""" params: dict[str, Any] = { @@ -177,59 +187,58 @@ async def list( return await self._request.get("/vocabularies", params=params) - async def get( - self, - vocabulary_id: str, - *, - include_stats: bool = False, - include_domains: bool = False, - ) -> Vocabulary: + async def get(self, vocabulary_id: str) -> Vocabulary: """Get vocabulary details.""" - params: dict[str, Any] = {} - if include_stats: - params["include_stats"] = "true" - if include_domains: - params["include_domains"] = "true" - - return await self._request.get( - f"/vocabularies/{vocabulary_id}", params=params or None - ) + return await self._request.get(f"/vocabularies/{vocabulary_id}") async def stats(self, vocabulary_id: str) -> VocabularyStats: """Get vocabulary statistics.""" return await self._request.get(f"/vocabularies/{vocabulary_id}/stats") - async def domains( - self, - *, - vocabulary_ids: list[str] | None = None, - page: int = 1, - page_size: int = 50, - ) -> dict[str, Any]: - """Get vocabulary domains.""" - params: dict[str, Any] = {"page": page, "page_size": page_size} - if vocabulary_ids: - params["vocabulary_ids"] = ",".join(vocabulary_ids) - return await self._request.get("/vocabularies/domains", params=params) + async def domain_stats(self, vocabulary_id: str, domain_id: str) -> dict[str, Any]: + """Get statistics for a specific domain within a vocabulary.""" + return await self._request.get( + f"/vocabularies/{vocabulary_id}/stats/domains/{domain_id}" + ) + + async def domains(self) -> dict[str, Any]: + """Get all standard OHDSI domains.""" + return await self._request.get("/vocabularies/domains") + + async def concept_classes(self) -> dict[str, Any]: + """Get all concept classes.""" + return await self._request.get("/vocabularies/concept-classes") async def concepts( self, vocabulary_id: str, *, - domain_id: str | None = None, - concept_class_id: str | None = None, - standard_only: bool = False, + search: str | None = None, + standard_concept: str = "all", + include_invalid: bool = False, + include_relationships: bool = False, + include_synonyms: bool = False, + sort_by: str = "name", + sort_order: str = "asc", page: int = 1, - page_size: int = 50, + page_size: int = 20, ) -> dict[str, Any]: """Get concepts in a vocabulary.""" - params: dict[str, Any] = {"page": page, "page_size": page_size} - if domain_id: - params["domain_id"] = domain_id - if concept_class_id: - params["concept_class_id"] = concept_class_id - if standard_only: - params["standard_only"] = "true" + params: dict[str, Any] = { + "page": page, + "page_size": page_size, + "standard_concept": standard_concept, + "sort_by": sort_by, + "sort_order": sort_order, + } + if search: + params["search"] = search + if include_invalid: + params["include_invalid"] = "true" + if include_relationships: + params["include_relationships"] = "true" + if include_synonyms: + params["include_synonyms"] = "true" return await self._request.get( f"/vocabularies/{vocabulary_id}/concepts", params=params diff --git a/src/omophub/types/__init__.py b/src/omophub/types/__init__.py index 630bd4d..7b0842c 100644 --- a/src/omophub/types/__init__.py +++ b/src/omophub/types/__init__.py @@ -16,6 +16,15 @@ Synonym, ) from .domain import Domain, DomainCategory, DomainStats, DomainSummary +from .fhir import ( + FhirBatchResult, + FhirBatchSummary, + FhirCodeableConceptResult, + FhirResolution, + FhirResolveResult, + RecommendedConceptOutput, + ResolvedConcept, +) from .hierarchy import ( Ancestor, Descendant, @@ -34,10 +43,24 @@ RelationshipType, ) from .search import ( + BulkSearchDefaults, + BulkSearchInput, + BulkSearchResponse, + BulkSearchResultItem, + BulkSemanticSearchDefaults, + BulkSemanticSearchInput, + BulkSemanticSearchResponse, + BulkSemanticSearchResultItem, + QueryEnhancement, SearchFacet, SearchFacets, SearchMetadata, SearchResult, + SemanticSearchMeta, + SemanticSearchResult, + SimilarConcept, + SimilarSearchMetadata, + SimilarSearchResult, Suggestion, ) from .vocabulary import ( @@ -48,45 +71,58 @@ ) __all__ = [ - # Common "APIResponse", - # Hierarchy "Ancestor", "BatchConceptResult", - # Concept + "BulkSearchDefaults", + "BulkSearchInput", + "BulkSearchResponse", + "BulkSearchResultItem", + "BulkSemanticSearchDefaults", + "BulkSemanticSearchInput", + "BulkSemanticSearchResponse", + "BulkSemanticSearchResultItem", "Concept", "ConceptSummary", "Descendant", - # Domain "Domain", "DomainCategory", "DomainStats", "DomainSummary", "ErrorDetail", "ErrorResponse", + "FhirBatchResult", + "FhirBatchSummary", + "FhirCodeableConceptResult", + "FhirResolution", + "FhirResolveResult", "HierarchyPath", "HierarchySummary", - # Mapping "Mapping", "MappingContext", "MappingQuality", "MappingSummary", "PaginationMeta", "PaginationParams", + "QueryEnhancement", + "RecommendedConceptOutput", "RelatedConcept", - # Relationship "Relationship", "RelationshipSummary", "RelationshipType", + "ResolvedConcept", "ResponseMeta", - # Search "SearchFacet", "SearchFacets", "SearchMetadata", "SearchResult", + "SemanticSearchMeta", + "SemanticSearchResult", + "SimilarConcept", + "SimilarSearchMetadata", + "SimilarSearchResult", "Suggestion", "Synonym", - # Vocabulary "Vocabulary", "VocabularyDomain", "VocabularyStats", diff --git a/src/omophub/types/fhir.py b/src/omophub/types/fhir.py new file mode 100644 index 0000000..26f60c6 --- /dev/null +++ b/src/omophub/types/fhir.py @@ -0,0 +1,80 @@ +"""FHIR Resolver type definitions.""" + +from __future__ import annotations + +from typing import Any, TypedDict + +from typing_extensions import NotRequired + + +class ResolvedConcept(TypedDict): + """Concept shape returned by the FHIR resolver.""" + + concept_id: int + concept_name: str + concept_code: str + vocabulary_id: str + domain_id: str + concept_class_id: str + standard_concept: str | None + + +class RecommendedConceptOutput(TypedDict): + """Phoebe recommendation returned when include_recommendations is true.""" + + concept_id: int + concept_name: str + vocabulary_id: str + domain_id: str + concept_class_id: str + standard_concept: str | None + relationship_id: str + + +class FhirResolution(TypedDict): + """The ``resolution`` block inside a single-resolve response.""" + + vocabulary_id: str | None + source_concept: ResolvedConcept + standard_concept: ResolvedConcept + mapping_type: str + target_table: str | None + domain_resource_alignment: str + relationship_id: NotRequired[str] + similarity_score: NotRequired[float] + alignment_note: NotRequired[str] + mapping_quality: NotRequired[str] + quality_note: NotRequired[str] + alternative_standard_concepts: NotRequired[list[ResolvedConcept]] + recommendations: NotRequired[list[RecommendedConceptOutput]] + + +class FhirResolveResult(TypedDict): + """Response from ``POST /v1/fhir/resolve``.""" + + input: dict[str, Any] + resolution: FhirResolution + + +class FhirBatchSummary(TypedDict): + """Summary block inside a batch-resolve response.""" + + total: int + resolved: int + failed: int + + +class FhirBatchResult(TypedDict): + """Response from ``POST /v1/fhir/resolve/batch``.""" + + results: list[dict[str, Any]] + summary: FhirBatchSummary + + +class FhirCodeableConceptResult(TypedDict): + """Response from ``POST /v1/fhir/resolve/codeable-concept``.""" + + input: dict[str, Any] + best_match: FhirResolveResult | None + alternatives: list[FhirResolveResult] + unresolved: list[dict[str, Any]] diff --git a/src/omophub/types/search.py b/src/omophub/types/search.py index 0e6e768..d761b08 100644 --- a/src/omophub/types/search.py +++ b/src/omophub/types/search.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import TYPE_CHECKING, TypedDict +from typing import TYPE_CHECKING, Any, TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, Required if TYPE_CHECKING: from .concept import Concept @@ -21,6 +21,168 @@ class Suggestion(TypedDict): vocabulary_id: NotRequired[str] +class SemanticSearchResult(TypedDict): + """Result from semantic concept search.""" + + concept_id: int + concept_name: str + domain_id: str + vocabulary_id: str + concept_class_id: str + standard_concept: str | None + concept_code: str + similarity_score: float + matched_text: str + + +class SemanticSearchMeta(TypedDict, total=False): + """Metadata for semantic search.""" + + query: str + total_results: int + filters_applied: dict[str, Any] + + +class SimilarConcept(TypedDict): + """A concept similar to the query concept.""" + + concept_id: int + concept_name: str + domain_id: str + vocabulary_id: str + concept_class_id: str + standard_concept: str | None + concept_code: str + similarity_score: float + matched_text: NotRequired[str] + similarity_explanation: NotRequired[str] + + +class SimilarSearchMetadata(TypedDict, total=False): + """Metadata for similar concept search.""" + + original_query: str + algorithm_used: str + similarity_threshold: float + total_candidates: int + results_returned: int + processing_time_ms: int + embedding_latency_ms: int + + +class SimilarSearchResult(TypedDict): + """Result from similar concept search.""" + + similar_concepts: list[SimilarConcept] + search_metadata: SimilarSearchMetadata + + +# --------------------------------------------------------------------------- +# Bulk search types +# --------------------------------------------------------------------------- + + +class BulkSearchInput(TypedDict, total=False): + """Input for a single query in a bulk lexical search.""" + + search_id: Required[str] + query: Required[str] + vocabulary_ids: list[str] + domain_ids: list[str] + concept_class_ids: list[str] + standard_concept: str + include_invalid: bool + page_size: int + + +class BulkSearchDefaults(TypedDict, total=False): + """Default filters applied to all searches in a bulk lexical request.""" + + vocabulary_ids: list[str] + domain_ids: list[str] + concept_class_ids: list[str] + standard_concept: str + include_invalid: bool + page_size: int + + +class BulkSearchResultItem(TypedDict): + """Result for a single query in a bulk lexical search.""" + + search_id: str + query: str + results: list[dict[str, Any]] + status: str # "completed" | "failed" + error: NotRequired[str] + duration: NotRequired[int] + + +class BulkSearchResponse(TypedDict): + """Response from bulk lexical search.""" + + results: list[BulkSearchResultItem] + total_searches: int + completed_searches: int + failed_searches: int + + +class BulkSemanticSearchInput(TypedDict, total=False): + """Input for a single query in a bulk semantic search.""" + + search_id: Required[str] + query: Required[str] # 1-500 characters + page_size: int + threshold: float + vocabulary_ids: list[str] + domain_ids: list[str] + standard_concept: str + concept_class_id: str + + +class BulkSemanticSearchDefaults(TypedDict, total=False): + """Default filters applied to all searches in a bulk semantic request.""" + + page_size: int + threshold: float + vocabulary_ids: list[str] + domain_ids: list[str] + standard_concept: str + concept_class_id: str + + +class QueryEnhancement(TypedDict, total=False): + """Query enhancement info from semantic search.""" + + original_query: str + enhanced_query: str + abbreviations_expanded: list[str] + misspellings_corrected: list[str] + + +class BulkSemanticSearchResultItem(TypedDict): + """Result for a single query in a bulk semantic search.""" + + search_id: str + query: str + results: list[dict[str, Any]] + status: str # "completed" | "failed" + error: NotRequired[str] + similarity_threshold: NotRequired[float] + result_count: NotRequired[int] + duration: NotRequired[int] + query_enhancement: NotRequired[QueryEnhancement] + + +class BulkSemanticSearchResponse(TypedDict): + """Response from bulk semantic search.""" + + results: list[BulkSemanticSearchResultItem] + total_searches: int + completed_count: int + failed_count: int + total_duration: NotRequired[int] + + class SearchFacet(TypedDict): """Search facet with count.""" diff --git a/src/omophub/types/vocabulary.py b/src/omophub/types/vocabulary.py index 2686c5f..235d707 100644 --- a/src/omophub/types/vocabulary.py +++ b/src/omophub/types/vocabulary.py @@ -24,15 +24,28 @@ class VocabularyDomain(TypedDict): classification_count: NotRequired[int] +class DomainDistribution(TypedDict): + """Domain distribution within vocabulary statistics.""" + + domain_id: str + domain_name: str + concept_count: int + + class VocabularyStats(TypedDict): - """Vocabulary statistics.""" + """Vocabulary statistics from /vocabularies/{vocabulary_id}/stats endpoint.""" + vocabulary_id: str + vocabulary_name: str total_concepts: int standard_concepts: NotRequired[int] classification_concepts: NotRequired[int] invalid_concepts: NotRequired[int] - relationships_count: NotRequired[int] - synonyms_count: NotRequired[int] + active_concepts: NotRequired[int] + valid_start_date: NotRequired[str] + valid_end_date: NotRequired[str] + last_updated: NotRequired[str] + domain_distribution: NotRequired[list[DomainDistribution]] class Vocabulary(TypedDict): diff --git a/tests/conftest.py b/tests/conftest.py index 2af7506..a48935c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,6 +3,7 @@ from __future__ import annotations import os +import time from typing import Any import pytest @@ -85,7 +86,7 @@ def mock_api_response(mock_concept: dict[str, Any]) -> dict[str, Any]: "meta": { "request_id": "req_test123", "timestamp": "2024-12-01T00:00:00Z", - "vocab_release": "2024.4", + "vocab_release": "2024.2", }, } @@ -135,6 +136,20 @@ def mock_error_response() -> dict[str, Any]: } +@pytest.fixture(autouse=True) +def rate_limit_delay(request: pytest.FixtureRequest) -> None: + """Add delay between integration tests to avoid rate limiting.""" + yield + # Only delay for integration tests + if "integration" in request.keywords: + # Bulk endpoints consume more rate limit budget + test_name = request.node.name + if "bulk" in test_name: + time.sleep(5) + else: + time.sleep(2) + + # Well-known test concept IDs for integration tests DIABETES_CONCEPT_ID = 201826 # Type 2 diabetes mellitus (SNOMED) ASPIRIN_CONCEPT_ID = 1112807 # Aspirin (RxNorm) @@ -162,6 +177,11 @@ def extract_data(result: dict[str, Any] | list[Any], key: str) -> list[Any]: value = result.get(key) if isinstance(value, list): return value + # Handle nested dicts: {"results": {"results": [...]}} + if isinstance(value, dict) and key in value: + nested = value.get(key) + if isinstance(nested, list): + return nested # Fallback: check 'results' key for batch endpoint backward compatibility # (production API returns 'results', new API will return 'concepts') if key == "concepts": diff --git a/tests/integration/test_async.py b/tests/integration/test_async.py index 6da8e05..e2a6a3c 100644 --- a/tests/integration/test_async.py +++ b/tests/integration/test_async.py @@ -109,7 +109,7 @@ async def test_async_autocomplete( """Async autocomplete.""" result = await async_integration_client.search.autocomplete( "aspi", - max_suggestions=5, + page_size=5, ) suggestions = extract_data(result, "suggestions") diff --git a/tests/integration/test_concepts.py b/tests/integration/test_concepts.py index 6804c70..03fc76a 100644 --- a/tests/integration/test_concepts.py +++ b/tests/integration/test_concepts.py @@ -61,7 +61,7 @@ def test_batch_concepts(self, integration_client: OMOPHub) -> None: def test_suggest_concepts(self, integration_client: OMOPHub) -> None: """Get concept suggestions.""" - result = integration_client.concepts.suggest("diabetes", limit=5) + result = integration_client.concepts.suggest("diabetes", page_size=5) # Result is a list of concept objects with concept_name field suggestions = extract_data(result, "suggestions") @@ -81,9 +81,9 @@ def test_suggest_concepts_with_filters(self, integration_client: OMOPHub) -> Non """Get concept suggestions with vocabulary filter.""" result = integration_client.concepts.suggest( "aspirin", - vocabulary="RxNorm", - domain="Drug", - limit=10, + vocabulary_ids=["RxNorm"], + domain_ids=["Drug"], + page_size=10, ) # Should get at least one result or empty list @@ -94,7 +94,7 @@ def test_get_related_concepts(self, integration_client: OMOPHub) -> None: """Get related concepts.""" result = integration_client.concepts.related( DIABETES_CONCEPT_ID, - max_results=10, + page_size=10, ) # Should have related_concepts key @@ -105,7 +105,6 @@ def test_get_concept_relationships(self, integration_client: OMOPHub) -> None: """Get concept relationships.""" result = integration_client.concepts.relationships( DIABETES_CONCEPT_ID, - page_size=20, ) # Should have relationships diff --git a/tests/integration/test_domains.py b/tests/integration/test_domains.py index e2c7d10..5cc276a 100644 --- a/tests/integration/test_domains.py +++ b/tests/integration/test_domains.py @@ -27,30 +27,17 @@ def test_list_domains(self, integration_client: OMOPHub) -> None: assert "Drug" in domain_ids assert "Procedure" in domain_ids - def test_list_domains_with_options(self, integration_client: OMOPHub) -> None: - """List domains with statistics and examples.""" - result = integration_client.domains.list( - include_concept_counts=True, - include_statistics=True, - ) - - domains = result.get("domains", []) - assert len(domains) > 0 - # Verify domains are returned with expected structure - assert all("domain_id" in d for d in domains) - - def test_list_domains_with_vocabulary_filter( - self, integration_client: OMOPHub - ) -> None: - """List domains filtered by vocabulary.""" - result = integration_client.domains.list( - vocabulary_ids=["SNOMED"], - include_concept_counts=True, - ) + def test_list_domains_with_stats(self, integration_client: OMOPHub) -> None: + """List domains with statistics.""" + result = integration_client.domains.list(include_stats=True) domains = result.get("domains", []) - assert isinstance(domains, list) assert len(domains) > 0 + # Verify domains are returned with expected structure including stats + for domain in domains: + assert "domain_id" in domain + assert "concept_count" in domain + assert "standard_concept_count" in domain def test_get_domain_concepts(self, integration_client: OMOPHub) -> None: """Get concepts in Condition domain.""" diff --git a/tests/integration/test_fhir.py b/tests/integration/test_fhir.py new file mode 100644 index 0000000..100d0e6 --- /dev/null +++ b/tests/integration/test_fhir.py @@ -0,0 +1,84 @@ +"""Integration tests for the FHIR resolver resource.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +if TYPE_CHECKING: + from omophub import OMOPHub + + +@pytest.mark.integration +class TestFhirIntegration: + """Integration tests for FHIR resolver against the production API.""" + + def test_resolve_snomed_live(self, integration_client: OMOPHub) -> None: + """Resolve SNOMED 44054006 (Type 2 diabetes) to OMOP concept.""" + result = integration_client.fhir.resolve( + system="http://snomed.info/sct", + code="44054006", + resource_type="Condition", + ) + + res = result["resolution"] + assert res["mapping_type"] == "direct" + assert res["target_table"] == "condition_occurrence" + assert res["standard_concept"]["vocabulary_id"] == "SNOMED" + assert res["standard_concept"]["standard_concept"] == "S" + assert res["domain_resource_alignment"] == "aligned" + + def test_resolve_icd10cm_live(self, integration_client: OMOPHub) -> None: + """Resolve ICD-10-CM E11.9 — non-standard, should traverse Maps to.""" + result = integration_client.fhir.resolve( + system="http://hl7.org/fhir/sid/icd-10-cm", + code="E11.9", + ) + + res = result["resolution"] + assert res["vocabulary_id"] == "ICD10CM" + assert res["source_concept"]["vocabulary_id"] == "ICD10CM" + assert res["standard_concept"]["standard_concept"] == "S" + assert res["target_table"] == "condition_occurrence" + + def test_resolve_batch_live(self, integration_client: OMOPHub) -> None: + """Batch resolve 3 mixed codings.""" + result = integration_client.fhir.resolve_batch( + [ + {"system": "http://snomed.info/sct", "code": "44054006"}, + {"system": "http://loinc.org", "code": "2339-0"}, + {"system": "http://www.nlm.nih.gov/research/umls/rxnorm", "code": "197696"}, + ] + ) + + assert result["summary"]["total"] == 3 + assert result["summary"]["resolved"] + result["summary"]["failed"] == 3 + assert len(result["results"]) == 3 + + def test_resolve_codeable_concept_live(self, integration_client: OMOPHub) -> None: + """CodeableConcept: SNOMED should win over ICD-10-CM.""" + result = integration_client.fhir.resolve_codeable_concept( + coding=[ + {"system": "http://snomed.info/sct", "code": "44054006"}, + {"system": "http://hl7.org/fhir/sid/icd-10-cm", "code": "E11.9"}, + ], + resource_type="Condition", + ) + + assert result["best_match"] is not None + best = result["best_match"]["resolution"] + assert best["source_concept"]["vocabulary_id"] == "SNOMED" + assert best["target_table"] == "condition_occurrence" + + def test_resolve_with_quality_live(self, integration_client: OMOPHub) -> None: + """Mapping quality signal is returned when requested.""" + result = integration_client.fhir.resolve( + system="http://snomed.info/sct", + code="44054006", + include_quality=True, + ) + + res = result["resolution"] + assert "mapping_quality" in res + assert res["mapping_quality"] in ("high", "medium", "low", "manual_review") diff --git a/tests/integration/test_hierarchy.py b/tests/integration/test_hierarchy.py index acc015a..ceb449b 100644 --- a/tests/integration/test_hierarchy.py +++ b/tests/integration/test_hierarchy.py @@ -37,7 +37,7 @@ def test_get_ancestors_with_options(self, integration_client: OMOPHub) -> None: """Get ancestors with various options.""" result = integration_client.hierarchy.ancestors( DIABETES_CONCEPT_ID, - vocabulary_id="SNOMED", + vocabulary_ids=["SNOMED"], max_levels=5, include_distance=True, page_size=50, @@ -46,6 +46,11 @@ def test_get_ancestors_with_options(self, integration_client: OMOPHub) -> None: ancestors = result.get("ancestors", result) assert isinstance(ancestors, list) + # Verify hierarchy_summary structure if present + if "hierarchy_summary" in result: + summary = result["hierarchy_summary"] + assert "max_hierarchy_depth" in summary or "total_ancestors" in summary + def test_get_descendants(self, integration_client: OMOPHub) -> None: """Get descendants of a parent concept.""" # 201820 is Diabetes mellitus (parent of Type 2) @@ -61,12 +66,17 @@ def test_get_descendants_with_filters(self, integration_client: OMOPHub) -> None """Get descendants with domain and vocabulary filters.""" result = integration_client.hierarchy.descendants( 201820, - vocabulary_id="SNOMED", + vocabulary_ids=["SNOMED"], max_levels=3, include_distance=True, - standard_only=True, + include_invalid=False, page_size=100, ) descendants = result.get("descendants", result) assert isinstance(descendants, list) + + # Verify hierarchy_summary structure if present + if "hierarchy_summary" in result: + summary = result["hierarchy_summary"] + assert "max_hierarchy_depth" in summary or "total_descendants" in summary diff --git a/tests/integration/test_mappings.py b/tests/integration/test_mappings.py index 22b0ee1..96e8141 100644 --- a/tests/integration/test_mappings.py +++ b/tests/integration/test_mappings.py @@ -27,20 +27,17 @@ def test_get_mappings_to_icd10(self, integration_client: OMOPHub) -> None: """Get ICD-10 mappings for SNOMED concept.""" result = integration_client.mappings.get( DIABETES_CONCEPT_ID, - target_vocabularies=["ICD10CM"], - direction="outgoing", + target_vocabulary="ICD10CM", ) mappings = extract_data(result, "mappings") assert isinstance(mappings, list) def test_get_mappings_with_options(self, integration_client: OMOPHub) -> None: - """Get mappings with quality and context options.""" + """Get mappings with include_invalid option.""" result = integration_client.mappings.get( DIABETES_CONCEPT_ID, - include_mapping_quality=True, - include_context=True, - page_size=50, + include_invalid=True, ) mappings = extract_data(result, "mappings") diff --git a/tests/integration/test_relationships.py b/tests/integration/test_relationships.py index c89d1dc..cd50d65 100644 --- a/tests/integration/test_relationships.py +++ b/tests/integration/test_relationships.py @@ -29,7 +29,7 @@ def test_get_relationships_with_type_filter( """Get relationships filtered by type.""" result = integration_client.relationships.get( DIABETES_CONCEPT_ID, - relationship_type="Is a", + relationship_ids=["Is a"], page_size=50, ) @@ -42,7 +42,7 @@ def test_get_relationships_with_vocabulary_filter( """Get relationships filtered by target vocabulary.""" result = integration_client.relationships.get( DIABETES_CONCEPT_ID, - target_vocabulary="SNOMED", + vocabulary_ids=["SNOMED"], page_size=100, ) @@ -60,25 +60,17 @@ def test_get_relationship_types(self, integration_client: OMOPHub) -> None: def test_get_relationship_types_with_filters( self, integration_client: OMOPHub ) -> None: - """Get relationship types with filters.""" + """Get relationship types with pagination.""" result = integration_client.relationships.types( - vocabulary_ids=["SNOMED"], - include_reverse=True, - include_usage_stats=True, page_size=50, ) types = result.get("relationship_types", result) assert isinstance(types, list) - def test_get_relationship_types_by_category( - self, integration_client: OMOPHub - ) -> None: - """Get relationship types by category.""" - result = integration_client.relationships.types( - category="hierarchy", - standard_only=True, - ) + def test_get_relationship_types_basic(self, integration_client: OMOPHub) -> None: + """Get relationship types with default settings.""" + result = integration_client.relationships.types() types = result.get("relationship_types", result) assert isinstance(types, list) diff --git a/tests/integration/test_search.py b/tests/integration/test_search.py index 4ff9643..df4b311 100644 --- a/tests/integration/test_search.py +++ b/tests/integration/test_search.py @@ -49,11 +49,59 @@ def test_search_with_domain_filter(self, integration_client: OMOPHub) -> None: for concept in concepts: assert concept.get("domain_id") == "Drug" + def test_search_with_standard_concept_filter( + self, integration_client: OMOPHub + ) -> None: + """Search for standard concepts only.""" + results = integration_client.search.basic( + "diabetes", + standard_concept="S", + page_size=20, + ) + + concepts = extract_data(results, "concepts") + assert len(concepts) > 0 + # All results should be standard concepts + for concept in concepts: + assert concept.get("standard_concept") == "S" + + def test_search_with_multiple_filters_and_pagination( + self, integration_client: OMOPHub + ) -> None: + """Search with multiple filters to test COUNT query parameter binding. + + This test specifically catches COUNT query bugs where parameter binding + differs between the main query and the count query. If the COUNT query + fails (like with missing standard_concept parameter), the API would + return a 500 error instead of results. + """ + results = integration_client.search.basic( + "diabetes", + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + standard_concept="S", + page_size=10, + ) + + # Extract concepts - SDK may return list directly or wrapped in dict + concepts = extract_data(results, "concepts") + assert len(concepts) > 0, "Expected concepts but got empty result" + + # Verify all filters applied correctly + for concept in concepts: + assert concept.get("vocabulary_id") == "SNOMED" + assert concept.get("domain_id") == "Condition" + assert concept.get("standard_concept") == "S" + + # Note: The SDK extracts concepts from the response, so we verify + # the COUNT query worked by the fact that we got results without + # an error (COUNT query failure would cause HTTP 500) + def test_autocomplete(self, integration_client: OMOPHub) -> None: """Test autocomplete suggestions.""" result = integration_client.search.autocomplete( "diab", - max_suggestions=10, + page_size=10, ) suggestions = extract_data(result, "suggestions") @@ -76,25 +124,274 @@ def test_autocomplete_with_filters(self, integration_client: OMOPHub) -> None: "hyper", vocabulary_ids=["SNOMED"], domains=["Condition"], - max_suggestions=5, + page_size=5, ) suggestions = extract_data(result, "suggestions") assert isinstance(suggestions, list) def test_basic_iter_pagination(self, integration_client: OMOPHub) -> None: - """Test auto-pagination with basic_iter.""" - # Collect first 5 concepts using iterator + """Test auto-pagination with basic_iter. + + This test verifies that basic_iter correctly fetches multiple pages + of results. With page_size=2, we should be able to collect 5 concepts + which requires fetching at least 3 pages, proving pagination works. + """ + # Collect concepts using iterator with small page size concepts = [] + page_size = 2 + max_concepts = 5 + for concept in integration_client.search.basic_iter( "diabetes", - page_size=2, # Small page size to test pagination + page_size=page_size, # Small page size to test pagination ): concepts.append(concept) - if len(concepts) >= 5: + if len(concepts) >= max_concepts: break - assert len(concepts) == 5 + # Should get requested number of concepts (proves pagination worked) + assert len(concepts) == max_concepts, ( + f"Expected {max_concepts} concepts from paginated iterator, " + f"got {len(concepts)}. With page_size={page_size}, getting only " + f"{len(concepts)} concepts suggests pagination is broken." + ) + # All should have concept_id for concept in concepts: assert "concept_id" in concept + + +@pytest.mark.integration +class TestSemanticSearchIntegration: + """Integration tests for semantic search endpoints.""" + + def test_semantic_search_basic(self, integration_client: OMOPHub) -> None: + """Test basic semantic search returns results with similarity scores.""" + result = integration_client.search.semantic( + "myocardial infarction", page_size=5 + ) + + # SDK may return data wrapped in 'results' key or as a list + results = extract_data(result, "results") + + # Should have results + assert isinstance(results, list) + if len(results) > 0: + # Each result should have similarity score + for r in results: + assert "similarity_score" in r or "score" in r + assert "concept_id" in r + + def test_semantic_search_with_filters(self, integration_client: OMOPHub) -> None: + """Test semantic search with vocabulary/domain filters.""" + result = integration_client.search.semantic( + "diabetes", + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + threshold=0.5, + page_size=10, + ) + + results = extract_data(result, "results") + + # If results exist, verify filters applied + if len(results) > 0: + for r in results: + assert r.get("vocabulary_id") == "SNOMED" + assert r.get("domain_id") == "Condition" + + def test_semantic_search_with_threshold(self, integration_client: OMOPHub) -> None: + """Test that higher threshold returns fewer or equal results.""" + result_low = integration_client.search.semantic( + "heart attack", threshold=0.3, page_size=20 + ) + result_high = integration_client.search.semantic( + "heart attack", threshold=0.8, page_size=20 + ) + + results_low = extract_data(result_low, "results") + results_high = extract_data(result_high, "results") + + # Guard: skip test if no results to compare + if not results_low: + pytest.skip( + "No results returned for low threshold - cannot test threshold comparison" + ) + + # Higher threshold should return fewer or equal results + assert len(results_high) <= len(results_low) + + def test_semantic_iter_pagination(self, integration_client: OMOPHub) -> None: + """Test auto-pagination via semantic_iter.""" + import itertools + + results = list( + itertools.islice( + integration_client.search.semantic_iter("diabetes", page_size=5), 10 + ) + ) + + # Should get up to 10 results across multiple pages + assert len(results) > 0 + # Each result should have required fields + for r in results: + assert "concept_id" in r + + def test_similar_by_concept_id(self, integration_client: OMOPHub) -> None: + """Test finding similar concepts by ID.""" + from tests.conftest import MI_CONCEPT_ID + + result = integration_client.search.similar( + concept_id=MI_CONCEPT_ID, page_size=5 + ) + + # Should have similar_concepts key or be a list + similar = extract_data(result, "similar_concepts") + + assert isinstance(similar, list) + # If results exist, verify structure + if len(similar) > 0: + for concept in similar: + assert "concept_id" in concept + + def test_similar_by_query(self, integration_client: OMOPHub) -> None: + """Test finding similar concepts by natural language query.""" + result = integration_client.search.similar( + query="elevated blood glucose", page_size=5 + ) + + similar = extract_data(result, "similar_concepts") + + assert isinstance(similar, list) + + def test_similar_with_algorithm(self, integration_client: OMOPHub) -> None: + """Test similar search with different algorithms.""" + from tests.conftest import MI_CONCEPT_ID + + result = integration_client.search.similar( + concept_id=MI_CONCEPT_ID, + algorithm="semantic", + similarity_threshold=0.6, + page_size=5, + ) + + # Verify response structure + assert isinstance(result, dict) + # May have search_metadata with algorithm info + metadata = result.get("search_metadata", {}) + if metadata: + assert metadata.get("algorithm_used") in ["semantic", "hybrid", "lexical"] + + def test_similar_with_vocabulary_filter(self, integration_client: OMOPHub) -> None: + """Test similar search filtered by vocabulary.""" + from tests.conftest import DIABETES_CONCEPT_ID + + result = integration_client.search.similar( + concept_id=DIABETES_CONCEPT_ID, + vocabulary_ids=["SNOMED"], + page_size=10, + ) + + similar = extract_data(result, "similar_concepts") + + # If results, all should be from SNOMED + for concept in similar: + assert concept.get("vocabulary_id") == "SNOMED" + + +@pytest.mark.integration +class TestBulkBasicSearchIntegration: + """Integration tests for bulk lexical search.""" + + def test_bulk_basic_multiple_queries(self, integration_client: OMOPHub) -> None: + """Test bulk basic search with multiple queries.""" + result = integration_client.search.bulk_basic([ + {"search_id": "q1", "query": "diabetes mellitus"}, + {"search_id": "q2", "query": "hypertension"}, + {"search_id": "q3", "query": "aspirin"}, + ], defaults={"page_size": 5}) + + results = extract_data(result, "results") + assert len(results) == 3 + + # Verify all 3 search IDs are present with results + returned_ids = {item["search_id"] for item in results} + assert returned_ids == {"q1", "q2", "q3"} + for item in results: + assert item["status"] == "completed" + assert len(item["results"]) > 0 + + def test_bulk_basic_with_vocabulary_filter(self, integration_client: OMOPHub) -> None: + """Test bulk basic search with shared vocabulary filter.""" + result = integration_client.search.bulk_basic([ + {"search_id": "snomed1", "query": "diabetes"}, + {"search_id": "snomed2", "query": "myocardial infarction"}, + ], defaults={"vocabulary_ids": ["SNOMED"], "page_size": 3}) + + results = extract_data(result, "results") + for item in results: + assert item["status"] == "completed" + # Verify SNOMED filter applied + for concept in item["results"]: + assert concept.get("vocabulary_id") == "SNOMED" + + def test_bulk_basic_single_query(self, integration_client: OMOPHub) -> None: + """Test bulk basic search with a single query.""" + result = integration_client.search.bulk_basic([ + {"search_id": "single", "query": "metformin", "page_size": 3}, + ]) + + results = extract_data(result, "results") + assert len(results) == 1 + assert results[0]["search_id"] == "single" + assert results[0]["status"] == "completed" + + +@pytest.mark.integration +class TestBulkSemanticSearchIntegration: + """Integration tests for bulk semantic search.""" + + def test_bulk_semantic_multiple_queries(self, integration_client: OMOPHub) -> None: + """Test bulk semantic search with multiple natural-language queries.""" + result = integration_client.search.bulk_semantic([ + {"search_id": "s1", "query": "heart failure treatment options"}, + {"search_id": "s2", "query": "type 2 diabetes medication"}, + ], defaults={"threshold": 0.5, "page_size": 5}) + + results = extract_data(result, "results") + assert len(results) == 2 + + for item in results: + assert item["search_id"] in ("s1", "s2") + assert item["status"] == "completed" + + def test_bulk_semantic_with_filters(self, integration_client: OMOPHub) -> None: + """Test bulk semantic search with vocabulary and domain filters.""" + result = integration_client.search.bulk_semantic([ + { + "search_id": "filtered", + "query": "pain relief medication", + "vocabulary_ids": ["SNOMED"], + "page_size": 3, + "threshold": 0.5, + }, + ]) + + results = extract_data(result, "results") + assert len(results) == 1 + assert results[0]["status"] == "completed" + + # Verify SNOMED vocabulary filter was applied + for concept in results[0]["results"]: + assert concept.get("vocabulary_id") == "SNOMED" + + def test_bulk_semantic_single_query(self, integration_client: OMOPHub) -> None: + """Test bulk semantic search with a single query.""" + result = integration_client.search.bulk_semantic([ + {"search_id": "one", "query": "elevated blood pressure", "threshold": 0.5}, + ]) + + results = extract_data(result, "results") + assert len(results) == 1 + assert results[0]["search_id"] == "one" diff --git a/tests/integration/test_vocabularies.py b/tests/integration/test_vocabularies.py index f9915ea..ed43077 100644 --- a/tests/integration/test_vocabularies.py +++ b/tests/integration/test_vocabularies.py @@ -48,13 +48,9 @@ def test_get_vocabulary(self, integration_client: OMOPHub) -> None: assert vocab["vocabulary_id"] == "SNOMED" assert "vocabulary_name" in vocab - def test_get_vocabulary_with_options(self, integration_client: OMOPHub) -> None: - """Get vocabulary with stats and domains.""" - vocab = integration_client.vocabularies.get( - "SNOMED", - include_stats=True, - include_domains=True, - ) + def test_get_vocabulary_basic(self, integration_client: OMOPHub) -> None: + """Get vocabulary (use stats() method for statistics).""" + vocab = integration_client.vocabularies.get("SNOMED") assert vocab["vocabulary_id"] == "SNOMED" assert "vocabulary_name" in vocab @@ -72,18 +68,22 @@ def test_get_vocabulary_stats(self, integration_client: OMOPHub) -> None: ) def test_get_vocabulary_domains(self, integration_client: OMOPHub) -> None: - """Get vocabulary domains.""" - result = integration_client.vocabularies.domains(vocabulary_ids=["SNOMED"]) + """Get all OHDSI domains.""" + result = integration_client.vocabularies.domains() domains = extract_data(result, "domains") assert isinstance(domains, list) + assert len(domains) > 0 + # Verify domain structure + assert all("domain_id" in d for d in domains) + assert all("domain_name" in d for d in domains) def test_get_vocabulary_concepts(self, integration_client: OMOPHub) -> None: """Get concepts in SNOMED vocabulary.""" result = integration_client.vocabularies.concepts( "SNOMED", - domain_id="Condition", - standard_only=True, + search="diabetes", + standard_concept="S", page_size=10, ) diff --git a/tests/unit/resources/test_concepts.py b/tests/unit/resources/test_concepts.py index 2168a1e..4023e55 100644 --- a/tests/unit/resources/test_concepts.py +++ b/tests/unit/resources/test_concepts.py @@ -87,9 +87,7 @@ def test_batch_concepts_with_options( ) -> None: """Test batch concepts with all options.""" route = respx.post(f"{base_url}/concepts/batch").mock( - return_value=Response( - 200, json={"success": True, "data": {"concepts": []}} - ) + return_value=Response(200, json={"success": True, "data": {"concepts": []}}) ) sync_client.concepts.batch( @@ -137,15 +135,15 @@ def test_suggest_concepts_with_filters( sync_client.concepts.suggest( "diabetes", - vocabulary="SNOMED", - domain="Condition", - limit=20, + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + page_size=20, ) url_str = str(route.calls[0].request.url) - assert "vocabulary=SNOMED" in url_str - assert "domain=Condition" in url_str - assert "limit=20" in url_str + assert "vocabulary_ids=SNOMED" in url_str + assert "domain_ids=Condition" in url_str + assert "page_size=20" in url_str @respx.mock def test_get_related_concepts(self, sync_client: OMOPHub, base_url: str) -> None: @@ -179,23 +177,15 @@ def test_get_related_with_options( sync_client.concepts.related( 201826, - relatedness_types=["hierarchical", "semantic"], - vocabulary_ids=["SNOMED"], - domain_ids=["Condition"], - min_relatedness_score=0.5, - max_results=100, - include_scores=False, - standard_concepts_only=True, + relationship_types=["Is a", "Maps to"], + min_score=0.5, + page_size=100, ) url_str = str(route.calls[0].request.url) - assert "relatedness_types=hierarchical%2Csemantic" in url_str - assert "vocabulary_ids=SNOMED" in url_str - assert "domain_ids=Condition" in url_str - assert "min_relatedness_score=0.5" in url_str - assert "max_results=100" in url_str - assert "include_scores=false" in url_str - assert "standard_concepts_only=true" in url_str + assert "relationship_types=Is+a%2CMaps+to" in url_str + assert "min_score=0.5" in url_str + assert "page_size=100" in url_str @respx.mock def test_get_concept_relationships( @@ -230,20 +220,173 @@ def test_get_concept_relationships_with_options( sync_client.concepts.relationships( 201826, - relationship_type="Is a", - target_vocabulary="SNOMED", + relationship_ids="Is a", + vocabulary_ids="SNOMED", include_invalid=True, - page=2, - page_size=50, ) url_str = str(route.calls[0].request.url) # All params use snake_case to match API standards - assert "relationship_type=Is+a" in url_str - assert "target_vocabulary=SNOMED" in url_str + assert "relationship_ids=Is+a" in url_str + assert "vocabulary_ids=SNOMED" in url_str + assert "include_invalid=true" in url_str + + @respx.mock + def test_get_concept_relationships_with_all_options( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test relationships with all filter options including standard_only and include_reverse.""" + route = respx.get(f"{base_url}/concepts/201826/relationships").mock( + return_value=Response( + 200, json={"success": True, "data": {"relationships": []}} + ) + ) + + sync_client.concepts.relationships( + 201826, + relationship_ids=["Is a", "Maps to"], + vocabulary_ids=["SNOMED", "ICD10CM"], + domain_ids=["Condition", "Drug"], + include_invalid=True, + standard_only=True, + include_reverse=True, + vocab_release="2025.1", + ) + + url_str = str(route.calls[0].request.url) + assert "relationship_ids=Is+a%2CMaps+to" in url_str + assert "vocabulary_ids=SNOMED%2CICD10CM" in url_str + assert "domain_ids=Condition%2CDrug" in url_str assert "include_invalid=true" in url_str - assert "page=2" in url_str - assert "page_size=50" in url_str + assert "standard_only=true" in url_str + assert "include_reverse=true" in url_str + assert "vocab_release=2025.1" in url_str + + @respx.mock + def test_recommended_concepts(self, sync_client: OMOPHub, base_url: str) -> None: + """Test getting recommended concepts.""" + recommended_response = { + "success": True, + "data": { + "recommendations": [ + {"concept_id": 201820, "score": 0.95}, + ], + "meta": {"total": 1}, + }, + } + respx.post(f"{base_url}/concepts/recommended").mock( + return_value=Response(200, json=recommended_response) + ) + + result = sync_client.concepts.recommended([201826]) + assert "recommendations" in result + + @respx.mock + def test_recommended_concepts_with_options( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test recommended concepts with all options.""" + route = respx.post(f"{base_url}/concepts/recommended").mock( + return_value=Response( + 200, json={"success": True, "data": {"recommendations": []}} + ) + ) + + sync_client.concepts.recommended( + [201826, 1112807], + relationship_types=["Is a", "Maps to"], + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + standard_only=False, + include_invalid=True, + page=2, + page_size=50, + ) + + # Verify POST body was sent + assert route.calls[0].request.content + + @respx.mock + def test_get_concept_with_hierarchy( + self, sync_client: OMOPHub, mock_api_response: dict, base_url: str + ) -> None: + """Test getting a concept with hierarchy option.""" + route = respx.get(f"{base_url}/concepts/201826").mock( + return_value=Response(200, json=mock_api_response) + ) + + sync_client.concepts.get(201826, include_hierarchy=True) + + url_str = str(route.calls[0].request.url) + assert "include_hierarchy=true" in url_str + + @respx.mock + def test_get_concept_with_vocab_release( + self, sync_client: OMOPHub, mock_api_response: dict, base_url: str + ) -> None: + """Test getting a concept with vocab_release option.""" + route = respx.get(f"{base_url}/concepts/201826").mock( + return_value=Response(200, json=mock_api_response) + ) + + sync_client.concepts.get(201826, vocab_release="2025.1") + + url_str = str(route.calls[0].request.url) + assert "vocab_release=2025.1" in url_str + + @respx.mock + def test_get_by_code_with_all_options( + self, sync_client: OMOPHub, mock_api_response: dict, base_url: str + ) -> None: + """Test getting concept by code with all options.""" + route = respx.get(f"{base_url}/concepts/by-code/SNOMED/44054006").mock( + return_value=Response(200, json=mock_api_response) + ) + + sync_client.concepts.get_by_code( + "SNOMED", + "44054006", + include_relationships=True, + include_synonyms=True, + include_hierarchy=True, + vocab_release="2025.1", + ) + + url_str = str(route.calls[0].request.url) + assert "include_relationships=true" in url_str + assert "include_synonyms=true" in url_str + assert "include_hierarchy=true" in url_str + assert "vocab_release=2025.1" in url_str + + @respx.mock + def test_suggest_concepts_with_vocab_release( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test suggest with vocab_release option.""" + route = respx.get(f"{base_url}/concepts/suggest").mock( + return_value=Response(200, json={"success": True, "data": []}) + ) + + sync_client.concepts.suggest("diabetes", vocab_release="2025.1") + + url_str = str(route.calls[0].request.url) + assert "vocab_release=2025.1" in url_str + + @respx.mock + def test_related_concepts_with_vocab_release( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test related concepts with vocab_release option.""" + route = respx.get(f"{base_url}/concepts/201826/related").mock( + return_value=Response( + 200, json={"success": True, "data": {"related_concepts": []}} + ) + ) + + sync_client.concepts.related(201826, vocab_release="2025.1") + + url_str = str(route.calls[0].request.url) + assert "vocab_release=2025.1" in url_str class TestAsyncConceptsResource: @@ -324,9 +467,7 @@ async def test_async_batch_concepts_with_options( ) -> None: """Test async batch with options.""" route = respx.post(f"{base_url}/concepts/batch").mock( - return_value=Response( - 200, json={"success": True, "data": {"concepts": []}} - ) + return_value=Response(200, json={"success": True, "data": {"concepts": []}}) ) await async_client.concepts.batch( @@ -369,15 +510,15 @@ async def test_async_suggest_with_filters( await async_client.concepts.suggest( "aspirin", - vocabulary="SNOMED", - domain="Drug", - limit=5, + vocabulary_ids=["SNOMED"], + domain_ids=["Drug"], + page_size=5, ) url_str = str(route.calls[0].request.url) - assert "vocabulary=SNOMED" in url_str - assert "domain=Drug" in url_str - assert "limit=5" in url_str + assert "vocabulary_ids=SNOMED" in url_str + assert "domain_ids=Drug" in url_str + assert "page_size=5" in url_str @pytest.mark.asyncio @respx.mock @@ -408,20 +549,15 @@ async def test_async_get_related_with_options( await async_client.concepts.related( 201826, - relatedness_types=["semantic"], - vocabulary_ids=["SNOMED"], - domain_ids=["Condition"], - min_relatedness_score=0.7, - max_results=25, - include_scores=True, - standard_concepts_only=True, + relationship_types=["Is a"], + min_score=0.7, + page_size=25, ) url_str = str(route.calls[0].request.url) - assert "relatedness_types=semantic" in url_str - assert "vocabulary_ids=SNOMED" in url_str - assert "min_relatedness_score=0.7" in url_str - assert "standard_concepts_only=true" in url_str + assert "relationship_types=Is+a" in url_str + assert "min_score=0.7" in url_str + assert "page_size=25" in url_str @pytest.mark.asyncio @respx.mock @@ -452,14 +588,178 @@ async def test_async_get_relationships_with_options( await async_client.concepts.relationships( 201826, - relationship_type="Maps to", - target_vocabulary="ICD10CM", + relationship_ids="Maps to", + vocabulary_ids="ICD10CM", + include_invalid=True, + ) + + url_str = str(route.calls[0].request.url) + assert "relationship_ids=Maps+to" in url_str + assert "vocabulary_ids=ICD10CM" in url_str + assert "include_invalid=true" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_get_relationships_with_all_options( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async relationships with all options.""" + route = respx.get(f"{base_url}/concepts/201826/relationships").mock( + return_value=Response( + 200, json={"success": True, "data": {"relationships": []}} + ) + ) + + await async_client.concepts.relationships( + 201826, + relationship_ids=["Is a"], + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + include_invalid=True, + standard_only=True, + include_reverse=True, + vocab_release="2025.1", + ) + + url_str = str(route.calls[0].request.url) + assert "domain_ids=Condition" in url_str + assert "standard_only=true" in url_str + assert "include_reverse=true" in url_str + assert "vocab_release=2025.1" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_recommended_concepts( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async getting recommended concepts.""" + recommended_response = { + "success": True, + "data": { + "recommendations": [{"concept_id": 201820}], + }, + } + respx.post(f"{base_url}/concepts/recommended").mock( + return_value=Response(200, json=recommended_response) + ) + + result = await async_client.concepts.recommended([201826]) + assert "recommendations" in result + + @pytest.mark.asyncio + @respx.mock + async def test_async_recommended_concepts_with_options( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async recommended with all options.""" + route = respx.post(f"{base_url}/concepts/recommended").mock( + return_value=Response( + 200, json={"success": True, "data": {"recommendations": []}} + ) + ) + + await async_client.concepts.recommended( + [201826, 1112807], + relationship_types=["Is a"], + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + standard_only=False, include_invalid=True, page=1, page_size=100, ) + assert route.calls[0].request.content + + @pytest.mark.asyncio + @respx.mock + async def test_async_get_concept_with_hierarchy( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async get concept with hierarchy option.""" + route = respx.get(f"{base_url}/concepts/201826").mock( + return_value=Response( + 200, json={"success": True, "data": {"concept_id": 201826}} + ) + ) + + await async_client.concepts.get(201826, include_hierarchy=True) + url_str = str(route.calls[0].request.url) - assert "relationship_type=Maps+to" in url_str - assert "target_vocabulary=ICD10CM" in url_str - assert "include_invalid=true" in url_str + assert "include_hierarchy=true" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_get_concept_with_vocab_release( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async get concept with vocab_release option.""" + route = respx.get(f"{base_url}/concepts/201826").mock( + return_value=Response( + 200, json={"success": True, "data": {"concept_id": 201826}} + ) + ) + + await async_client.concepts.get(201826, vocab_release="2025.1") + + url_str = str(route.calls[0].request.url) + assert "vocab_release=2025.1" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_get_by_code_with_all_options( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async get by code with all options.""" + route = respx.get(f"{base_url}/concepts/by-code/SNOMED/44054006").mock( + return_value=Response( + 200, json={"success": True, "data": {"concept_id": 201826}} + ) + ) + + await async_client.concepts.get_by_code( + "SNOMED", + "44054006", + include_relationships=True, + include_synonyms=True, + include_hierarchy=True, + vocab_release="2025.1", + ) + + url_str = str(route.calls[0].request.url) + assert "include_relationships=true" in url_str + assert "include_synonyms=true" in url_str + assert "include_hierarchy=true" in url_str + assert "vocab_release=2025.1" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_suggest_with_vocab_release( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async suggest with vocab_release option.""" + route = respx.get(f"{base_url}/concepts/suggest").mock( + return_value=Response(200, json={"success": True, "data": []}) + ) + + await async_client.concepts.suggest("diabetes", vocab_release="2025.1") + + url_str = str(route.calls[0].request.url) + assert "vocab_release=2025.1" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_related_with_vocab_release( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async related with vocab_release option.""" + route = respx.get(f"{base_url}/concepts/201826/related").mock( + return_value=Response( + 200, json={"success": True, "data": {"related_concepts": []}} + ) + ) + + await async_client.concepts.related(201826, vocab_release="2025.1") + + url_str = str(route.calls[0].request.url) + assert "vocab_release=2025.1" in url_str diff --git a/tests/unit/resources/test_domains.py b/tests/unit/resources/test_domains.py index 35035b5..38492cc 100644 --- a/tests/unit/resources/test_domains.py +++ b/tests/unit/resources/test_domains.py @@ -27,7 +27,6 @@ def test_list_domains(self, sync_client: OMOPHub, base_url: str) -> None: {"domain_id": "Drug", "domain_name": "Drug"}, {"domain_id": "Procedure", "domain_name": "Procedure"}, ], - "summary": {"total_domains": 3}, }, } respx.get(f"{base_url}/domains").mock( @@ -38,36 +37,47 @@ def test_list_domains(self, sync_client: OMOPHub, base_url: str) -> None: assert "domains" in result @respx.mock - def test_list_domains_with_options( - self, sync_client: OMOPHub, base_url: str - ) -> None: - """Test listing domains with all filter options.""" + def test_list_domains_with_stats(self, sync_client: OMOPHub, base_url: str) -> None: + """Test listing domains with include_stats option.""" route = respx.get(f"{base_url}/domains").mock( return_value=Response( - 200, json={"success": True, "data": {"domains": []}} + 200, + json={ + "success": True, + "data": { + "domains": [ + { + "domain_id": "Condition", + "domain_name": "Condition", + "concept_count": 845672, + "standard_concept_count": 423891, + "vocabulary_coverage": ["SNOMED", "ICD10CM"], + } + ] + }, + }, ) ) - sync_client.domains.list( - vocabulary_ids=["SNOMED", "ICD10CM"], - include_concept_counts=True, - include_statistics=True, - include_examples=True, - standard_only=True, - active_only=False, - sort_by="concept_count", - sort_order="desc", + sync_client.domains.list(include_stats=True) + + url_str = str(route.calls[0].request.url) + assert "include_stats=true" in url_str + + @respx.mock + def test_list_domains_without_stats( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test listing domains without stats (default).""" + route = respx.get(f"{base_url}/domains").mock( + return_value=Response(200, json={"success": True, "data": {"domains": []}}) ) + sync_client.domains.list() + url_str = str(route.calls[0].request.url) - assert "vocabulary_ids=SNOMED%2CICD10CM" in url_str - assert "include_concept_counts=true" in url_str - assert "include_statistics=true" in url_str - assert "include_examples=true" in url_str - assert "standard_only=true" in url_str - assert "active_only=false" in url_str - assert "sort_by=concept_count" in url_str - assert "sort_order=desc" in url_str + # Should not include include_stats when False (default) + assert "include_stats" not in url_str @respx.mock def test_get_domain_concepts(self, sync_client: OMOPHub, base_url: str) -> None: @@ -89,8 +99,8 @@ def test_get_domain_concepts(self, sync_client: OMOPHub, base_url: str) -> None: result = sync_client.domains.concepts( "Condition", vocabulary_ids=["SNOMED"], - concept_class_ids=["Clinical Finding"], standard_only=True, + include_invalid=True, page=1, page_size=100, ) @@ -98,8 +108,8 @@ def test_get_domain_concepts(self, sync_client: OMOPHub, base_url: str) -> None: assert "concepts" in result url_str = str(route.calls[0].request.url) assert "vocabulary_ids=SNOMED" in url_str - assert "concept_class_ids=Clinical+Finding" in url_str assert "standard_only=true" in url_str + assert "include_invalid=true" in url_str assert "page=1" in url_str assert "page_size=100" in url_str @@ -114,9 +124,7 @@ async def test_async_list_domains( ) -> None: """Test async listing domains.""" respx.get(f"{base_url}/domains").mock( - return_value=Response( - 200, json={"success": True, "data": {"domains": []}} - ) + return_value=Response(200, json={"success": True, "data": {"domains": []}}) ) result = await async_client.domains.list() @@ -124,36 +132,18 @@ async def test_async_list_domains( @pytest.mark.asyncio @respx.mock - async def test_async_list_domains_with_options( + async def test_async_list_domains_with_stats( self, async_client: omophub.AsyncOMOPHub, base_url: str ) -> None: - """Test async listing domains with all options.""" + """Test async listing domains with include_stats.""" route = respx.get(f"{base_url}/domains").mock( - return_value=Response( - 200, json={"success": True, "data": {"domains": []}} - ) + return_value=Response(200, json={"success": True, "data": {"domains": []}}) ) - await async_client.domains.list( - vocabulary_ids=["SNOMED"], - include_concept_counts=True, - include_statistics=True, - include_examples=True, - standard_only=True, - active_only=False, - sort_by="domain_id", - sort_order="asc", - ) + await async_client.domains.list(include_stats=True) url_str = str(route.calls[0].request.url) - assert "vocabulary_ids=SNOMED" in url_str - assert "include_concept_counts=true" in url_str - assert "include_statistics=true" in url_str - assert "include_examples=true" in url_str - assert "active_only=false" in url_str - assert "standard_only=true" in url_str - assert "sort_by=domain_id" in url_str - assert "sort_order=asc" in url_str + assert "include_stats=true" in url_str @pytest.mark.asyncio @respx.mock @@ -162,23 +152,21 @@ async def test_async_get_domain_concepts( ) -> None: """Test async getting domain concepts.""" route = respx.get(f"{base_url}/domains/Condition/concepts").mock( - return_value=Response( - 200, json={"success": True, "data": {"concepts": []}} - ) + return_value=Response(200, json={"success": True, "data": {"concepts": []}}) ) await async_client.domains.concepts( "Condition", vocabulary_ids=["SNOMED", "ICD10CM"], - concept_class_ids=["Clinical Finding", "Disease"], standard_only=True, + include_invalid=True, page=2, page_size=25, ) url_str = str(route.calls[0].request.url) assert "vocabulary_ids=SNOMED%2CICD10CM" in url_str - assert "concept_class_ids=Clinical+Finding%2CDisease" in url_str assert "standard_only=true" in url_str + assert "include_invalid=true" in url_str assert "page=2" in url_str assert "page_size=25" in url_str diff --git a/tests/unit/resources/test_fhir.py b/tests/unit/resources/test_fhir.py new file mode 100644 index 0000000..17aa2dc --- /dev/null +++ b/tests/unit/resources/test_fhir.py @@ -0,0 +1,575 @@ +"""Tests for the FHIR resolver resource.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +import respx +from httpx import Response + +if TYPE_CHECKING: + from omophub import AsyncOMOPHub, OMOPHub + + +# -- Fixtures ---------------------------------------------------------------- + +SNOMED_RESOLVE_RESPONSE = { + "success": True, + "data": { + "input": { + "system": "http://snomed.info/sct", + "code": "44054006", + "resource_type": "Condition", + }, + "resolution": { + "vocabulary_id": "SNOMED", + "source_concept": { + "concept_id": 201826, + "concept_name": "Type 2 diabetes mellitus", + "concept_code": "44054006", + "vocabulary_id": "SNOMED", + "domain_id": "Condition", + "concept_class_id": "Clinical Finding", + "standard_concept": "S", + }, + "standard_concept": { + "concept_id": 201826, + "concept_name": "Type 2 diabetes mellitus", + "concept_code": "44054006", + "vocabulary_id": "SNOMED", + "domain_id": "Condition", + "concept_class_id": "Clinical Finding", + "standard_concept": "S", + }, + "mapping_type": "direct", + "target_table": "condition_occurrence", + "domain_resource_alignment": "aligned", + }, + }, + "meta": {"request_id": "test", "timestamp": "2026-04-10T00:00:00Z", "vocab_release": "2025.2"}, +} + +ICD10_MAPPED_RESPONSE = { + "success": True, + "data": { + "input": {"system": "http://hl7.org/fhir/sid/icd-10-cm", "code": "E11.9"}, + "resolution": { + "vocabulary_id": "ICD10CM", + "source_concept": { + "concept_id": 45576876, + "concept_name": "Type 2 diabetes mellitus without complications", + "concept_code": "E11.9", + "vocabulary_id": "ICD10CM", + "domain_id": "Condition", + "concept_class_id": "5-char billing code", + "standard_concept": None, + }, + "standard_concept": { + "concept_id": 201826, + "concept_name": "Type 2 diabetes mellitus", + "concept_code": "44054006", + "vocabulary_id": "SNOMED", + "domain_id": "Condition", + "concept_class_id": "Clinical Finding", + "standard_concept": "S", + }, + "mapping_type": "mapped", + "relationship_id": "Maps to", + "target_table": "condition_occurrence", + "domain_resource_alignment": "not_checked", + "mapping_quality": "high", + }, + }, +} + +BATCH_RESPONSE = { + "success": True, + "data": { + "results": [SNOMED_RESOLVE_RESPONSE["data"]], + "summary": {"total": 1, "resolved": 1, "failed": 0}, + }, +} + +CODEABLE_CONCEPT_RESPONSE = { + "success": True, + "data": { + "input": { + "coding": [ + {"system": "http://snomed.info/sct", "code": "44054006"}, + {"system": "http://hl7.org/fhir/sid/icd-10-cm", "code": "E11.9"}, + ], + "resource_type": "Condition", + }, + "best_match": SNOMED_RESOLVE_RESPONSE["data"], + "alternatives": [ICD10_MAPPED_RESPONSE["data"]], + "unresolved": [], + }, +} + + +# -- Sync tests -------------------------------------------------------------- + + +class TestFhirSync: + """Tests for the synchronous Fhir resource.""" + + @respx.mock + def test_resolve_snomed_direct(self, sync_client: OMOPHub, base_url: str) -> None: + """SNOMED direct resolution returns correct shape.""" + respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response(200, json=SNOMED_RESOLVE_RESPONSE) + ) + + result = sync_client.fhir.resolve( + system="http://snomed.info/sct", + code="44054006", + resource_type="Condition", + ) + + assert result["resolution"]["mapping_type"] == "direct" + assert result["resolution"]["target_table"] == "condition_occurrence" + assert result["resolution"]["standard_concept"]["concept_id"] == 201826 + + @respx.mock + def test_resolve_icd10_mapped(self, sync_client: OMOPHub, base_url: str) -> None: + """ICD-10-CM maps to a standard SNOMED concept.""" + respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response(200, json=ICD10_MAPPED_RESPONSE) + ) + + result = sync_client.fhir.resolve( + system="http://hl7.org/fhir/sid/icd-10-cm", + code="E11.9", + include_quality=True, + ) + + assert result["resolution"]["mapping_type"] == "mapped" + assert result["resolution"]["relationship_id"] == "Maps to" + assert result["resolution"]["mapping_quality"] == "high" + + @respx.mock + def test_resolve_text_only(self, sync_client: OMOPHub, base_url: str) -> None: + """Display-only input triggers semantic search fallback.""" + semantic_response = { + "success": True, + "data": { + "input": {"display": "Blood Sugar", "resource_type": "Observation"}, + "resolution": { + "vocabulary_id": None, + "source_concept": { + "concept_id": 3004501, + "concept_name": "Glucose [Mass/volume] in Blood", + "concept_code": "2339-0", + "vocabulary_id": "LOINC", + "domain_id": "Measurement", + "concept_class_id": "Lab Test", + "standard_concept": "S", + }, + "standard_concept": { + "concept_id": 3004501, + "concept_name": "Glucose [Mass/volume] in Blood", + "concept_code": "2339-0", + "vocabulary_id": "LOINC", + "domain_id": "Measurement", + "concept_class_id": "Lab Test", + "standard_concept": "S", + }, + "mapping_type": "semantic_match", + "similarity_score": 0.91, + "target_table": "measurement", + "domain_resource_alignment": "aligned", + }, + }, + } + respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response(200, json=semantic_response) + ) + + result = sync_client.fhir.resolve(display="Blood Sugar", resource_type="Observation") + + assert result["resolution"]["mapping_type"] == "semantic_match" + assert result["resolution"]["similarity_score"] == 0.91 + + @respx.mock + def test_resolve_with_recommendations(self, sync_client: OMOPHub, base_url: str) -> None: + """Recommendations are included when requested.""" + recs_response = {**SNOMED_RESOLVE_RESPONSE} + recs_response["data"] = { + **SNOMED_RESOLVE_RESPONSE["data"], + "resolution": { + **SNOMED_RESOLVE_RESPONSE["data"]["resolution"], + "recommendations": [ + { + "concept_id": 4193704, + "concept_name": "Hyperglycemia", + "vocabulary_id": "SNOMED", + "domain_id": "Condition", + "concept_class_id": "Clinical Finding", + "standard_concept": "S", + "relationship_id": "Has finding", + } + ], + }, + } + route = respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response(200, json=recs_response) + ) + + result = sync_client.fhir.resolve( + system="http://snomed.info/sct", + code="44054006", + include_recommendations=True, + recommendations_limit=3, + ) + + assert len(result["resolution"]["recommendations"]) == 1 + # Verify the request body included the flags + import json + + body = json.loads(route.calls[0].request.content) + assert body["include_recommendations"] is True + assert body["recommendations_limit"] == 3 + + @respx.mock + def test_resolve_unknown_system_400(self, sync_client: OMOPHub, base_url: str) -> None: + """Unknown URI raises an API error.""" + respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response( + 400, + json={ + "success": False, + "error": { + "code": "unknown_system", + "message": "Unknown FHIR code system URI", + "details": {"suggestion": "http://snomed.info/sct"}, + }, + }, + ) + ) + + with pytest.raises(Exception): + sync_client.fhir.resolve(system="http://snomed.info/sc", code="44054006") + + @respx.mock + def test_resolve_cpt4_403(self, sync_client: OMOPHub, base_url: str) -> None: + """CPT4 raises a 403 restricted error.""" + respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response( + 403, + json={ + "success": False, + "error": { + "code": "vocabulary_restricted", + "message": "CPT4 is excluded", + }, + }, + ) + ) + + with pytest.raises(Exception): + sync_client.fhir.resolve(system="http://www.ama-assn.org/go/cpt", code="99213") + + @respx.mock + def test_resolve_batch(self, sync_client: OMOPHub, base_url: str) -> None: + """Batch resolution returns results and summary.""" + respx.post(f"{base_url}/fhir/resolve/batch").mock( + return_value=Response(200, json=BATCH_RESPONSE) + ) + + result = sync_client.fhir.resolve_batch( + [{"system": "http://snomed.info/sct", "code": "44054006"}] + ) + + assert result["summary"]["total"] == 1 + assert result["summary"]["resolved"] == 1 + assert len(result["results"]) == 1 + + @respx.mock + def test_resolve_codeable_concept(self, sync_client: OMOPHub, base_url: str) -> None: + """CodeableConcept resolution returns best_match and alternatives.""" + respx.post(f"{base_url}/fhir/resolve/codeable-concept").mock( + return_value=Response(200, json=CODEABLE_CONCEPT_RESPONSE) + ) + + result = sync_client.fhir.resolve_codeable_concept( + coding=[ + {"system": "http://snomed.info/sct", "code": "44054006"}, + {"system": "http://hl7.org/fhir/sid/icd-10-cm", "code": "E11.9"}, + ], + resource_type="Condition", + ) + + assert result["best_match"] is not None + assert result["best_match"]["resolution"]["source_concept"]["vocabulary_id"] == "SNOMED" + assert len(result["alternatives"]) == 1 + + @respx.mock + def test_resolve_batch_with_all_options(self, sync_client: OMOPHub, base_url: str) -> None: + """Batch passes resource_type, include_recommendations, and include_quality.""" + route = respx.post(f"{base_url}/fhir/resolve/batch").mock( + return_value=Response(200, json=BATCH_RESPONSE) + ) + + sync_client.fhir.resolve_batch( + [{"system": "http://snomed.info/sct", "code": "44054006"}], + resource_type="Condition", + include_recommendations=True, + recommendations_limit=3, + include_quality=True, + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert body["resource_type"] == "Condition" + assert body["include_recommendations"] is True + assert body["recommendations_limit"] == 3 + assert body["include_quality"] is True + + @respx.mock + def test_resolve_codeable_concept_with_all_options( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """CodeableConcept passes text, resource_type, and enrichment flags.""" + route = respx.post(f"{base_url}/fhir/resolve/codeable-concept").mock( + return_value=Response(200, json=CODEABLE_CONCEPT_RESPONSE) + ) + + sync_client.fhir.resolve_codeable_concept( + coding=[{"system": "http://snomed.info/sct", "code": "44054006"}], + text="Type 2 diabetes", + resource_type="Condition", + include_recommendations=True, + recommendations_limit=5, + include_quality=True, + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert body["text"] == "Type 2 diabetes" + assert body["resource_type"] == "Condition" + assert body["include_recommendations"] is True + assert body["include_quality"] is True + + @respx.mock + def test_resolve_codeable_concept_minimal(self, sync_client: OMOPHub, base_url: str) -> None: + """CodeableConcept with no optional flags (covers False branches).""" + route = respx.post(f"{base_url}/fhir/resolve/codeable-concept").mock( + return_value=Response(200, json=CODEABLE_CONCEPT_RESPONSE) + ) + + sync_client.fhir.resolve_codeable_concept( + coding=[{"system": "http://snomed.info/sct", "code": "44054006"}], + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert "text" not in body + assert "resource_type" not in body + assert "include_recommendations" not in body + assert "include_quality" not in body + + @respx.mock + def test_resolve_sends_correct_body(self, sync_client: OMOPHub, base_url: str) -> None: + """Verify the POST body includes only non-None parameters.""" + route = respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response(200, json=SNOMED_RESOLVE_RESPONSE) + ) + + sync_client.fhir.resolve( + system="http://snomed.info/sct", + code="44054006", + resource_type="Condition", + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert body == { + "system": "http://snomed.info/sct", + "code": "44054006", + "resource_type": "Condition", + } + # Ensure optional flags are NOT sent when they're default False + assert "include_recommendations" not in body + assert "include_quality" not in body + + +# -- Async tests ------------------------------------------------------------- + + +class TestFhirAsync: + """Tests for the asynchronous AsyncFhir resource.""" + + @respx.mock + @pytest.mark.anyio + async def test_async_resolve(self, async_client: AsyncOMOPHub, base_url: str) -> None: + """Async resolve returns the same shape as sync.""" + respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response(200, json=SNOMED_RESOLVE_RESPONSE) + ) + + result = await async_client.fhir.resolve( + system="http://snomed.info/sct", + code="44054006", + ) + + assert result["resolution"]["mapping_type"] == "direct" + assert result["resolution"]["target_table"] == "condition_occurrence" + + @respx.mock + @pytest.mark.anyio + async def test_async_resolve_batch(self, async_client: AsyncOMOPHub, base_url: str) -> None: + """Async batch resolve returns results and summary.""" + respx.post(f"{base_url}/fhir/resolve/batch").mock( + return_value=Response(200, json=BATCH_RESPONSE) + ) + + result = await async_client.fhir.resolve_batch( + [{"system": "http://snomed.info/sct", "code": "44054006"}], + resource_type="Condition", + include_quality=True, + ) + + assert result["summary"]["total"] == 1 + + @respx.mock + @pytest.mark.anyio + async def test_async_resolve_codeable_concept( + self, async_client: AsyncOMOPHub, base_url: str + ) -> None: + """Async codeable concept resolve returns best_match.""" + respx.post(f"{base_url}/fhir/resolve/codeable-concept").mock( + return_value=Response(200, json=CODEABLE_CONCEPT_RESPONSE) + ) + + result = await async_client.fhir.resolve_codeable_concept( + coding=[ + {"system": "http://snomed.info/sct", "code": "44054006"}, + {"system": "http://hl7.org/fhir/sid/icd-10-cm", "code": "E11.9"}, + ], + text="Type 2 diabetes", + resource_type="Condition", + include_recommendations=True, + include_quality=True, + ) + + assert result["best_match"] is not None + + @respx.mock + @pytest.mark.anyio + async def test_async_resolve_vocabulary_id_bypass( + self, async_client: AsyncOMOPHub, base_url: str + ) -> None: + """Async resolve with vocabulary_id exercises the bypass branch.""" + route = respx.post(f"{base_url}/fhir/resolve").mock( + return_value=Response(200, json=ICD10_MAPPED_RESPONSE) + ) + + result = await async_client.fhir.resolve( + vocabulary_id="ICD10CM", + code="E11.9", + include_recommendations=True, + recommendations_limit=3, + include_quality=True, + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert body["vocabulary_id"] == "ICD10CM" + assert body["include_recommendations"] is True + assert body["recommendations_limit"] == 3 + assert body["include_quality"] is True + assert "resolution" in result + + @respx.mock + @pytest.mark.anyio + async def test_async_resolve_batch_all_flags( + self, async_client: AsyncOMOPHub, base_url: str + ) -> None: + """Async batch with include_recommendations exercises that branch.""" + route = respx.post(f"{base_url}/fhir/resolve/batch").mock( + return_value=Response(200, json=BATCH_RESPONSE) + ) + + await async_client.fhir.resolve_batch( + [{"system": "http://snomed.info/sct", "code": "44054006"}], + resource_type="Condition", + include_recommendations=True, + recommendations_limit=5, + include_quality=True, + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert body["include_recommendations"] is True + assert body["recommendations_limit"] == 5 + assert body["include_quality"] is True + + @respx.mock + @pytest.mark.anyio + async def test_async_fhir_property_cached(self, async_client: AsyncOMOPHub, base_url: str) -> None: + """Accessing client.fhir twice returns the same cached instance.""" + fhir1 = async_client.fhir + fhir2 = async_client.fhir + assert fhir1 is fhir2 + + + @respx.mock + @pytest.mark.anyio + async def test_async_resolve_codeable_minimal( + self, async_client: AsyncOMOPHub, base_url: str + ) -> None: + """Async codeable concept with no optional flags (covers False branches).""" + route = respx.post(f"{base_url}/fhir/resolve/codeable-concept").mock( + return_value=Response(200, json=CODEABLE_CONCEPT_RESPONSE) + ) + + await async_client.fhir.resolve_codeable_concept( + coding=[{"system": "http://snomed.info/sct", "code": "44054006"}], + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert "text" not in body + assert "resource_type" not in body + assert "include_recommendations" not in body + assert "include_quality" not in body + + @respx.mock + @pytest.mark.anyio + async def test_async_resolve_batch_minimal( + self, async_client: AsyncOMOPHub, base_url: str + ) -> None: + """Async batch with no optional flags (covers False branches).""" + route = respx.post(f"{base_url}/fhir/resolve/batch").mock( + return_value=Response(200, json=BATCH_RESPONSE) + ) + + await async_client.fhir.resolve_batch( + [{"system": "http://snomed.info/sct", "code": "44054006"}], + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert "resource_type" not in body + assert "include_recommendations" not in body + assert "include_quality" not in body + + +class TestFhirPropertyCaching: + """Test lazy-property cache hit on both client types.""" + + @respx.mock + def test_sync_fhir_property_cached(self, sync_client: OMOPHub) -> None: + """Accessing client.fhir twice returns the same cached instance.""" + fhir1 = sync_client.fhir + fhir2 = sync_client.fhir + assert fhir1 is fhir2 diff --git a/tests/unit/resources/test_hierarchy.py b/tests/unit/resources/test_hierarchy.py index bd2db57..dde4cd0 100644 --- a/tests/unit/resources/test_hierarchy.py +++ b/tests/unit/resources/test_hierarchy.py @@ -16,6 +16,97 @@ class TestHierarchyResource: """Tests for the synchronous Hierarchy resource.""" + @respx.mock + def test_get_hierarchy(self, sync_client: OMOPHub, base_url: str) -> None: + """Test getting complete hierarchy for a concept.""" + hierarchy_response = { + "success": True, + "data": { + "ancestors": [{"concept_id": 201820, "level": 1}], + "descendants": [{"concept_id": 201830, "level": 1}], + "summary": {"total_ancestors": 1, "total_descendants": 1}, + }, + } + respx.get(f"{base_url}/concepts/201826/hierarchy").mock( + return_value=Response(200, json=hierarchy_response) + ) + + result = sync_client.hierarchy.get(201826) + assert "ancestors" in result + assert "descendants" in result + + @respx.mock + def test_get_hierarchy_with_options( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test getting hierarchy with all options.""" + route = respx.get(f"{base_url}/concepts/201826/hierarchy").mock( + return_value=Response( + 200, + json={"success": True, "data": {"ancestors": [], "descendants": []}}, + ) + ) + + sync_client.hierarchy.get( + 201826, + format="flat", + vocabulary_ids=["SNOMED", "ICD10CM"], + domain_ids=["Condition"], + max_levels=15, + max_results=100, + relationship_types=["Is a", "Subsumes"], + include_invalid=True, + ) + + url_str = str(route.calls[0].request.url) + assert "format=flat" in url_str + assert "vocabulary_ids=SNOMED%2CICD10CM" in url_str + assert "domain_ids=Condition" in url_str + assert "max_levels=15" in url_str + assert "max_results=100" in url_str + assert "relationship_types=Is+a%2CSubsumes" in url_str + assert "include_invalid=true" in url_str + + @respx.mock + def test_get_hierarchy_max_levels_capped( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test that max_levels is capped at 20.""" + route = respx.get(f"{base_url}/concepts/201826/hierarchy").mock( + return_value=Response( + 200, + json={"success": True, "data": {"ancestors": [], "descendants": []}}, + ) + ) + + # Request max_levels=50, should be capped to 20 + sync_client.hierarchy.get(201826, max_levels=50) + + url_str = str(route.calls[0].request.url) + assert "max_levels=20" in url_str + + @respx.mock + def test_get_hierarchy_graph_format( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test getting hierarchy in graph format.""" + graph_response = { + "success": True, + "data": { + "nodes": [{"id": 201826, "label": "Type 2 diabetes"}], + "edges": [{"source": 201826, "target": 201820}], + }, + } + route = respx.get(f"{base_url}/concepts/201826/hierarchy").mock( + return_value=Response(200, json=graph_response) + ) + + result = sync_client.hierarchy.get(201826, format="graph") + + url_str = str(route.calls[0].request.url) + assert "format=graph" in url_str + assert "nodes" in result + @respx.mock def test_get_ancestors(self, sync_client: OMOPHub, base_url: str) -> None: """Test getting concept ancestors.""" @@ -23,8 +114,16 @@ def test_get_ancestors(self, sync_client: OMOPHub, base_url: str) -> None: "success": True, "data": { "ancestors": [ - {"concept_id": 201820, "concept_name": "Diabetes mellitus", "level": 1}, - {"concept_id": 4000, "concept_name": "Endocrine disorder", "level": 2}, + { + "concept_id": 201820, + "concept_name": "Diabetes mellitus", + "level": 1, + }, + { + "concept_id": 4000, + "concept_name": "Endocrine disorder", + "level": 2, + }, ], "summary": {"total_ancestors": 2, "max_level": 2}, }, @@ -37,7 +136,9 @@ def test_get_ancestors(self, sync_client: OMOPHub, base_url: str) -> None: assert "ancestors" in result @respx.mock - def test_get_ancestors_with_options(self, sync_client: OMOPHub, base_url: str) -> None: + def test_get_ancestors_with_options( + self, sync_client: OMOPHub, base_url: str + ) -> None: """Test getting ancestors with all options.""" route = respx.get(f"{base_url}/concepts/201826/ancestors").mock( return_value=Response( @@ -47,25 +148,23 @@ def test_get_ancestors_with_options(self, sync_client: OMOPHub, base_url: str) - sync_client.hierarchy.ancestors( 201826, - vocabulary_id="SNOMED", + vocabulary_ids=["SNOMED"], max_levels=5, relationship_types=["Is a", "Subsumes"], include_paths=True, include_distance=True, - standard_only=True, - include_deprecated=True, + include_invalid=True, page=2, page_size=50, ) url_str = str(route.calls[0].request.url) - assert "vocabulary_id=SNOMED" in url_str + assert "vocabulary_ids=SNOMED" in url_str assert "max_levels=5" in url_str assert "relationship_types=Is+a%2CSubsumes" in url_str assert "include_paths=true" in url_str assert "include_distance=true" in url_str - assert "standard_only=true" in url_str - assert "include_deprecated=true" in url_str + assert "include_invalid=true" in url_str assert "page=2" in url_str assert "page_size=50" in url_str @@ -101,28 +200,25 @@ def test_get_descendants_with_options( sync_client.hierarchy.descendants( 201820, - vocabulary_id="SNOMED", + vocabulary_ids=["SNOMED"], max_levels=3, relationship_types=["Is a"], include_distance=True, - standard_only=True, - include_deprecated=True, + include_paths=True, + include_invalid=True, domain_ids=["Condition"], - concept_class_ids=["Clinical Finding"], - include_synonyms=True, page=1, page_size=100, ) url_str = str(route.calls[0].request.url) - assert "vocabulary_id=SNOMED" in url_str + assert "vocabulary_ids=SNOMED" in url_str assert "max_levels=3" in url_str assert "relationship_types=Is+a" in url_str assert "include_distance=true" in url_str - assert "standard_only=true" in url_str + assert "include_paths=true" in url_str + assert "include_invalid=true" in url_str assert "domain_ids=Condition" in url_str - assert "concept_class_ids=Clinical+Finding" in url_str - assert "include_synonyms=true" in url_str assert "page=1" in url_str assert "page_size=100" in url_str @@ -130,24 +226,95 @@ def test_get_descendants_with_options( def test_get_descendants_max_levels_capped( self, sync_client: OMOPHub, base_url: str ) -> None: - """Test that max_levels is capped at 10.""" + """Test that max_levels is capped at 20.""" route = respx.get(f"{base_url}/concepts/201820/descendants").mock( return_value=Response( 200, json={"success": True, "data": {"descendants": []}} ) ) - # Request max_levels=50, should be capped to 10 + # Request max_levels=50, should be capped to 20 sync_client.hierarchy.descendants(201820, max_levels=50) url_str = str(route.calls[0].request.url) - assert "max_levels=10" in url_str - + assert "max_levels=20" in url_str class TestAsyncHierarchyResource: """Tests for the asynchronous AsyncHierarchy resource.""" + @pytest.mark.asyncio + @respx.mock + async def test_async_get_hierarchy( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async getting complete hierarchy.""" + hierarchy_response = { + "success": True, + "data": { + "ancestors": [{"concept_id": 201820, "level": 1}], + "descendants": [{"concept_id": 201830, "level": 1}], + }, + } + respx.get(f"{base_url}/concepts/201826/hierarchy").mock( + return_value=Response(200, json=hierarchy_response) + ) + + result = await async_client.hierarchy.get(201826) + assert "ancestors" in result + assert "descendants" in result + + @pytest.mark.asyncio + @respx.mock + async def test_async_get_hierarchy_with_options( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async hierarchy with all options.""" + route = respx.get(f"{base_url}/concepts/201826/hierarchy").mock( + return_value=Response( + 200, + json={"success": True, "data": {"ancestors": [], "descendants": []}}, + ) + ) + + await async_client.hierarchy.get( + 201826, + format="graph", + vocabulary_ids=["SNOMED"], + domain_ids=["Condition", "Drug"], + max_levels=10, + max_results=50, + relationship_types=["Is a"], + include_invalid=True, + ) + + url_str = str(route.calls[0].request.url) + assert "format=graph" in url_str + assert "vocabulary_ids=SNOMED" in url_str + assert "domain_ids=Condition%2CDrug" in url_str + assert "max_levels=10" in url_str + assert "max_results=50" in url_str + assert "relationship_types=Is+a" in url_str + assert "include_invalid=true" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_get_hierarchy_max_levels_capped( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async hierarchy max_levels capped at 20.""" + route = respx.get(f"{base_url}/concepts/201826/hierarchy").mock( + return_value=Response( + 200, + json={"success": True, "data": {"ancestors": [], "descendants": []}}, + ) + ) + + await async_client.hierarchy.get(201826, max_levels=100) + + url_str = str(route.calls[0].request.url) + assert "max_levels=20" in url_str + @pytest.mark.asyncio @respx.mock async def test_async_get_ancestors( @@ -177,19 +344,19 @@ async def test_async_get_ancestors_with_options( await async_client.hierarchy.ancestors( 201826, - vocabulary_id="SNOMED", + vocabulary_ids=["SNOMED"], max_levels=3, relationship_types=["Is a"], include_paths=True, - standard_only=True, + include_invalid=True, ) url_str = str(route.calls[0].request.url) - assert "vocabulary_id=SNOMED" in url_str + assert "vocabulary_ids=SNOMED" in url_str assert "max_levels=3" in url_str assert "include_paths=true" in url_str assert "relationship_types=Is+a" in url_str - assert "standard_only=true" in url_str + assert "include_invalid=true" in url_str @pytest.mark.asyncio @respx.mock @@ -220,19 +387,16 @@ async def test_async_get_descendants_with_filters( await async_client.hierarchy.descendants( 201820, - vocabulary_id="SNOMED", + vocabulary_ids=["SNOMED"], max_levels=5, domain_ids=["Condition"], - concept_class_ids=["Clinical Finding"], - standard_only=True, - include_synonyms=True, + include_invalid=True, + include_paths=True, ) url_str = str(route.calls[0].request.url) - assert "vocabulary_id=SNOMED" in url_str + assert "vocabulary_ids=SNOMED" in url_str assert "domain_ids=Condition" in url_str - assert "standard_only=true" in url_str + assert "include_invalid=true" in url_str assert "max_levels=5" in url_str - assert "concept_class_ids=Clinical+Finding" in url_str or "concept_class_ids=Clinical%20Finding" in url_str - assert "include_synonyms=true" in url_str - + assert "include_paths=true" in url_str diff --git a/tests/unit/resources/test_mappings.py b/tests/unit/resources/test_mappings.py index 65b175c..dfdd691 100644 --- a/tests/unit/resources/test_mappings.py +++ b/tests/unit/resources/test_mappings.py @@ -44,42 +44,20 @@ def test_get_mappings(self, sync_client: OMOPHub, base_url: str) -> None: def test_get_mappings_with_filters( self, sync_client: OMOPHub, base_url: str ) -> None: - """Test getting mappings with all filter options.""" + """Test getting mappings with filter options.""" route = respx.get(f"{base_url}/concepts/201826/mappings").mock( - return_value=Response( - 200, json={"success": True, "data": {"mappings": []}} - ) + return_value=Response(200, json={"success": True, "data": {"mappings": []}}) ) sync_client.mappings.get( 201826, - target_vocabularies=["ICD10CM", "ICD9CM"], - mapping_types=["MAPS TO", "IS A"], - direction="outgoing", - include_indirect=True, - standard_only=True, - include_mapping_quality=True, - include_synonyms=True, - include_context=True, - active_only=False, - sort_by="mapping_type", - sort_order="asc", - page=1, - page_size=100, + target_vocabulary="ICD10CM", + include_invalid=True, ) url_str = str(route.calls[0].request.url) - assert "target_vocabularies=ICD10CM%2CICD9CM" in url_str - assert "mapping_types=MAPS+TO%2CIS+A" in url_str - assert "direction=outgoing" in url_str - assert "include_indirect=true" in url_str - assert "standard_only=true" in url_str - assert "include_mapping_quality=true" in url_str - assert "include_synonyms=true" in url_str - assert "include_context=true" in url_str - assert "active_only=false" in url_str - assert "sort_by=mapping_type" in url_str - assert "sort_order=asc" in url_str + assert "target_vocabulary=ICD10CM" in url_str + assert "include_invalid=true" in url_str @respx.mock def test_map_concepts(self, sync_client: OMOPHub, base_url: str) -> None: @@ -101,8 +79,8 @@ def test_map_concepts(self, sync_client: OMOPHub, base_url: str) -> None: ) result = sync_client.mappings.map( - source_concepts=[{"concept_id": 201826}], target_vocabulary="ICD10CM", + source_concepts=[201826], ) assert "mappings" in result @@ -115,17 +93,12 @@ def test_map_concepts_with_options( ) -> None: """Test mapping concepts with additional options.""" route = respx.post(f"{base_url}/concepts/map").mock( - return_value=Response( - 200, json={"success": True, "data": {"mappings": []}} - ) + return_value=Response(200, json={"success": True, "data": {"mappings": []}}) ) sync_client.mappings.map( - source_concepts=[ - {"concept_id": 201826}, - {"vocabulary_id": "SNOMED", "concept_code": "44054006"}, - ], target_vocabulary="ICD10CM", + source_concepts=[201826, 4329847], mapping_type="equivalent", include_invalid=True, ) @@ -133,6 +106,62 @@ def test_map_concepts_with_options( # Verify POST body assert route.calls[0].request.content + @respx.mock + def test_map_concepts_with_source_codes( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test mapping concepts using source_codes parameter.""" + import json + + map_response = { + "success": True, + "data": { + "mappings": [ + { + "source_concept_id": 4306040, + "source_concept_name": "Acetaminophen", + "target_concept_id": 1125315, + "target_vocabulary_id": "RxNorm", + } + ], + }, + } + route = respx.post(f"{base_url}/concepts/map").mock( + return_value=Response(200, json=map_response) + ) + + result = sync_client.mappings.map( + target_vocabulary="RxNorm", + source_codes=[ + {"vocabulary_id": "SNOMED", "concept_code": "387517004"}, + {"vocabulary_id": "SNOMED", "concept_code": "108774000"}, + ], + ) + + assert "mappings" in result + # Verify request body contains source_codes + body = json.loads(route.calls[0].request.content) + assert "source_codes" in body + assert len(body["source_codes"]) == 2 + assert body["source_codes"][0]["vocabulary_id"] == "SNOMED" + + def test_map_concepts_requires_source(self, sync_client: OMOPHub) -> None: + """Test that map() raises error when neither source_concepts nor source_codes provided.""" + with pytest.raises( + ValueError, match="Either source_concepts or source_codes is required" + ): + sync_client.mappings.map(target_vocabulary="ICD10CM") + + def test_map_concepts_rejects_both_sources(self, sync_client: OMOPHub) -> None: + """Test that map() raises error when both source_concepts and source_codes provided.""" + with pytest.raises( + ValueError, match="Cannot use both source_concepts and source_codes" + ): + sync_client.mappings.map( + target_vocabulary="ICD10CM", + source_concepts=[201826], + source_codes=[{"vocabulary_id": "SNOMED", "concept_code": "44054006"}], + ) class TestAsyncMappingsResource: @@ -145,9 +174,7 @@ async def test_async_get_mappings( ) -> None: """Test async getting mappings.""" respx.get(f"{base_url}/concepts/201826/mappings").mock( - return_value=Response( - 200, json={"success": True, "data": {"mappings": []}} - ) + return_value=Response(200, json={"success": True, "data": {"mappings": []}}) ) result = await async_client.mappings.get(201826) @@ -158,32 +185,20 @@ async def test_async_get_mappings( async def test_async_get_mappings_with_filters( self, async_client: omophub.AsyncOMOPHub, base_url: str ) -> None: - """Test async mappings with all filters.""" + """Test async mappings with filters.""" route = respx.get(f"{base_url}/concepts/201826/mappings").mock( - return_value=Response( - 200, json={"success": True, "data": {"mappings": []}} - ) + return_value=Response(200, json={"success": True, "data": {"mappings": []}}) ) await async_client.mappings.get( 201826, - target_vocabularies=["ICD10CM"], - mapping_types=["MAPS TO"], - direction="both", - include_indirect=True, - standard_only=True, - include_mapping_quality=True, - include_synonyms=True, - include_context=True, - active_only=False, - sort_by="target_vocabulary", - sort_order="desc", + target_vocabulary="ICD10CM", + include_invalid=True, ) url_str = str(route.calls[0].request.url) - assert "target_vocabularies=ICD10CM" in url_str - assert "include_indirect=true" in url_str - assert "active_only=false" in url_str + assert "target_vocabulary=ICD10CM" in url_str + assert "include_invalid=true" in url_str @pytest.mark.asyncio @respx.mock @@ -192,14 +207,12 @@ async def test_async_map_concepts( ) -> None: """Test async mapping concepts.""" respx.post(f"{base_url}/concepts/map").mock( - return_value=Response( - 200, json={"success": True, "data": {"mappings": []}} - ) + return_value=Response(200, json={"success": True, "data": {"mappings": []}}) ) result = await async_client.mappings.map( - source_concepts=[{"concept_id": 201826}], target_vocabulary="ICD10CM", + source_concepts=[201826], ) assert "mappings" in result @@ -211,17 +224,61 @@ async def test_async_map_concepts_with_options( ) -> None: """Test async mapping with options.""" route = respx.post(f"{base_url}/concepts/map").mock( - return_value=Response( - 200, json={"success": True, "data": {"mappings": []}} - ) + return_value=Response(200, json={"success": True, "data": {"mappings": []}}) ) await async_client.mappings.map( - source_concepts=[{"concept_id": 201826}], target_vocabulary="ICD10CM", + source_concepts=[201826], mapping_type="direct", include_invalid=True, ) assert route.calls[0].request.content + @pytest.mark.asyncio + @respx.mock + async def test_async_map_concepts_with_source_codes( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async mapping concepts using source_codes.""" + import json + + route = respx.post(f"{base_url}/concepts/map").mock( + return_value=Response(200, json={"success": True, "data": {"mappings": []}}) + ) + + result = await async_client.mappings.map( + target_vocabulary="RxNorm", + source_codes=[ + {"vocabulary_id": "SNOMED", "concept_code": "387517004"}, + ], + ) + + assert "mappings" in result + body = json.loads(route.calls[0].request.content) + assert "source_codes" in body + + @pytest.mark.asyncio + async def test_async_map_requires_source( + self, async_client: omophub.AsyncOMOPHub + ) -> None: + """Test async map() raises error without sources.""" + with pytest.raises( + ValueError, match="Either source_concepts or source_codes is required" + ): + await async_client.mappings.map(target_vocabulary="ICD10CM") + + @pytest.mark.asyncio + async def test_async_map_rejects_both_sources( + self, async_client: omophub.AsyncOMOPHub + ) -> None: + """Test async map() raises error with both sources.""" + with pytest.raises( + ValueError, match="Cannot use both source_concepts and source_codes" + ): + await async_client.mappings.map( + target_vocabulary="ICD10CM", + source_concepts=[201826], + source_codes=[{"vocabulary_id": "SNOMED", "concept_code": "44054006"}], + ) diff --git a/tests/unit/resources/test_relationships.py b/tests/unit/resources/test_relationships.py index 10bceb5..e44677b 100644 --- a/tests/unit/resources/test_relationships.py +++ b/tests/unit/resources/test_relationships.py @@ -53,16 +53,16 @@ def test_get_relationships_with_filters( sync_client.relationships.get( 201826, - relationship_type="Is a", - target_vocabulary="SNOMED", + relationship_ids=["Is a"], + vocabulary_ids=["SNOMED"], include_invalid=True, page=2, page_size=100, ) url_str = str(route.calls[0].request.url) - assert "relationship_type=Is+a" in url_str - assert "target_vocabulary=SNOMED" in url_str + assert "relationship_ids=Is+a" in url_str + assert "vocabulary_ids=SNOMED" in url_str assert "include_invalid=true" in url_str assert "page=2" in url_str assert "page_size=100" in url_str @@ -88,55 +88,22 @@ def test_get_relationship_types(self, sync_client: OMOPHub, base_url: str) -> No assert "relationship_types" in result @respx.mock - def test_get_relationship_types_with_filters( + def test_get_relationship_types_with_pagination( self, sync_client: OMOPHub, base_url: str ) -> None: - """Test getting relationship types with all filter options.""" + """Test getting relationship types with pagination options.""" route = respx.get(f"{base_url}/relationships/types").mock( return_value=Response( 200, json={"success": True, "data": {"relationship_types": []}} ) ) - sync_client.relationships.types( - vocabulary_ids=["SNOMED", "ICD10CM"], - include_reverse=True, - include_usage_stats=True, - include_examples=True, - category="hierarchy", - is_defining=True, - standard_only=True, - page=1, - page_size=50, - ) + sync_client.relationships.types(page=2, page_size=50) url_str = str(route.calls[0].request.url) - assert "vocabulary_ids=SNOMED%2CICD10CM" in url_str - assert "include_reverse=true" in url_str - assert "include_usage_stats=true" in url_str - assert "include_examples=true" in url_str - assert "category=hierarchy" in url_str - assert "is_defining=true" in url_str - assert "standard_only=true" in url_str - assert "page=1" in url_str + assert "page=2" in url_str assert "page_size=50" in url_str - @respx.mock - def test_get_relationship_types_is_defining_false( - self, sync_client: OMOPHub, base_url: str - ) -> None: - """Test is_defining=False is properly encoded.""" - route = respx.get(f"{base_url}/relationships/types").mock( - return_value=Response( - 200, json={"success": True, "data": {"relationship_types": []}} - ) - ) - - sync_client.relationships.types(is_defining=False) - - url_str = str(route.calls[0].request.url) - assert "is_defining=false" in url_str - class TestAsyncRelationshipsResource: """Tests for the asynchronous AsyncRelationships resource.""" @@ -170,16 +137,16 @@ async def test_async_get_relationships_with_filters( await async_client.relationships.get( 201826, - relationship_type="Maps to", - target_vocabulary="ICD10CM", + relationship_ids=["Maps to"], + vocabulary_ids=["ICD10CM"], include_invalid=True, page=1, page_size=200, ) url_str = str(route.calls[0].request.url) - assert "relationship_type=Maps+to" in url_str - assert "target_vocabulary=ICD10CM" in url_str + assert "relationship_ids=Maps+to" in url_str + assert "vocabulary_ids=ICD10CM" in url_str assert "include_invalid=true" in url_str assert "page=1" in url_str assert "page_size=200" in url_str @@ -201,30 +168,18 @@ async def test_async_get_relationship_types( @pytest.mark.asyncio @respx.mock - async def test_async_get_relationship_types_with_filters( + async def test_async_get_relationship_types_with_pagination( self, async_client: omophub.AsyncOMOPHub, base_url: str ) -> None: - """Test async relationship types with all filters.""" + """Test async relationship types with pagination options.""" route = respx.get(f"{base_url}/relationships/types").mock( return_value=Response( 200, json={"success": True, "data": {"relationship_types": []}} ) ) - await async_client.relationships.types( - vocabulary_ids=["SNOMED"], - include_reverse=True, - include_usage_stats=True, - include_examples=True, - category="mapping", - is_defining=False, - standard_only=True, - page=1, - page_size=100, - ) + await async_client.relationships.types(page=3, page_size=25) url_str = str(route.calls[0].request.url) - assert "vocabulary_ids=SNOMED" in url_str - assert "include_reverse=true" in url_str - assert "category=mapping" in url_str - assert "is_defining=false" in url_str + assert "page=3" in url_str + assert "page_size=25" in url_str diff --git a/tests/unit/resources/test_search.py b/tests/unit/resources/test_search.py index bad0d91..2b8348f 100644 --- a/tests/unit/resources/test_search.py +++ b/tests/unit/resources/test_search.py @@ -36,12 +36,12 @@ def test_basic_search(self, sync_client: OMOPHub, base_url: str) -> None: assert "concepts" in result @respx.mock - def test_basic_search_with_filters(self, sync_client: OMOPHub, base_url: str) -> None: + def test_basic_search_with_filters( + self, sync_client: OMOPHub, base_url: str + ) -> None: """Test basic search with vocabulary and domain filters.""" route = respx.get(f"{base_url}/search/concepts").mock( - return_value=Response( - 200, json={"success": True, "data": {"concepts": []}} - ) + return_value=Response(200, json={"success": True, "data": {"concepts": []}}) ) sync_client.search.basic( @@ -77,15 +77,14 @@ def test_basic_search_with_filters(self, sync_client: OMOPHub, base_url: str) -> @respx.mock def test_basic_iter_single_page(self, sync_client: OMOPHub, base_url: str) -> None: """Test basic_iter with single page of results.""" + # Note: meta is at top level, not nested inside data search_response = { "success": True, - "data": { - "concepts": [ - {"concept_id": 201826, "concept_name": "Type 2 diabetes mellitus"}, - {"concept_id": 201820, "concept_name": "Diabetes mellitus"}, - ], - "meta": {"pagination": {"page": 1, "has_next": False}}, - }, + "data": [ + {"concept_id": 201826, "concept_name": "Type 2 diabetes mellitus"}, + {"concept_id": 201820, "concept_name": "Diabetes mellitus"}, + ], + "meta": {"pagination": {"page": 1, "has_next": False}}, } respx.get(f"{base_url}/search/concepts").mock( return_value=Response(200, json=search_response) @@ -95,21 +94,20 @@ def test_basic_iter_single_page(self, sync_client: OMOPHub, base_url: str) -> No assert len(concepts) == 2 @respx.mock - def test_basic_iter_multiple_pages(self, sync_client: OMOPHub, base_url: str) -> None: + def test_basic_iter_multiple_pages( + self, sync_client: OMOPHub, base_url: str + ) -> None: """Test basic_iter auto-pagination across multiple pages.""" + # Note: meta is at top level, not nested inside data page1_response = { "success": True, - "data": { - "concepts": [{"concept_id": 1}], - "meta": {"pagination": {"page": 1, "has_next": True}}, - }, + "data": [{"concept_id": 1}], + "meta": {"pagination": {"page": 1, "has_next": True}}, } page2_response = { "success": True, - "data": { - "concepts": [{"concept_id": 2}], - "meta": {"pagination": {"page": 2, "has_next": False}}, - }, + "data": [{"concept_id": 2}], + "meta": {"pagination": {"page": 2, "has_next": False}}, } call_count = 0 @@ -131,7 +129,7 @@ def mock_response(request): @respx.mock def test_advanced_search(self, sync_client: OMOPHub, base_url: str) -> None: """Test advanced search with POST body.""" - route = respx.post(f"{base_url}/concepts/search/advanced").mock( + route = respx.post(f"{base_url}/search/advanced").mock( return_value=Response( 200, json={ @@ -143,14 +141,14 @@ def test_advanced_search(self, sync_client: OMOPHub, base_url: str) -> None: sync_client.search.advanced( "myocardial infarction", - vocabularies=["SNOMED"], - domains=["Condition"], - concept_classes=["Clinical Finding"], + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + concept_class_ids=["Clinical Finding"], standard_concepts_only=True, include_invalid=True, relationship_filters=[{"type": "Is a", "concept_id": 123}], - limit=50, - offset=10, + page=2, + page_size=50, ) # Verify POST body was sent @@ -174,7 +172,7 @@ def test_autocomplete(self, sync_client: OMOPHub, base_url: str) -> None: "diab", vocabulary_ids=["SNOMED"], domains=["Condition"], - max_suggestions=5, + page_size=5, ) assert len(result) == 2 @@ -182,7 +180,7 @@ def test_autocomplete(self, sync_client: OMOPHub, base_url: str) -> None: assert "query=diab" in url_str assert "vocabulary_ids=SNOMED" in url_str assert "domains=Condition" in url_str - assert "max_suggestions=5" in url_str + assert "page_size=5" in url_str class TestAsyncSearchResource: @@ -214,9 +212,7 @@ async def test_async_basic_search_with_filters( ) -> None: """Test async basic search with all filters.""" route = respx.get(f"{base_url}/search/concepts").mock( - return_value=Response( - 200, json={"success": True, "data": {"concepts": []}} - ) + return_value=Response(200, json={"success": True, "data": {"concepts": []}}) ) await async_client.search.basic( @@ -247,16 +243,14 @@ async def test_async_advanced_search( self, async_client: omophub.AsyncOMOPHub, base_url: str ) -> None: """Test async advanced search.""" - respx.post(f"{base_url}/concepts/search/advanced").mock( - return_value=Response( - 200, json={"success": True, "data": {"concepts": []}} - ) + respx.post(f"{base_url}/search/advanced").mock( + return_value=Response(200, json={"success": True, "data": {"concepts": []}}) ) result = await async_client.search.advanced( "heart attack", - vocabularies=["SNOMED", "ICD10CM"], - domains=["Condition"], + vocabulary_ids=["SNOMED", "ICD10CM"], + domain_ids=["Condition"], standard_concepts_only=True, ) @@ -278,3 +272,717 @@ async def test_async_autocomplete( result = await async_client.search.autocomplete("asp") assert len(result) == 1 + + +class TestSemanticSearch: + """Tests for semantic search functionality.""" + + @respx.mock + def test_semantic_search(self, sync_client: OMOPHub, base_url: str) -> None: + """Test semantic concept search.""" + semantic_response = { + "success": True, + "data": { + "results": [ + { + "concept_id": 4329847, + "concept_name": "Myocardial infarction", + "domain_id": "Condition", + "vocabulary_id": "SNOMED", + "concept_class_id": "Clinical Finding", + "standard_concept": "S", + "concept_code": "22298006", + "similarity_score": 0.95, + "matched_text": "heart attack", + } + ], + }, + "meta": {"pagination": {"page": 1, "has_next": False, "total_items": 1}}, + } + route = respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json=semantic_response) + ) + + result = sync_client.search.semantic("heart attack") + assert "results" in result + assert len(result["results"]) == 1 + assert result["results"][0]["similarity_score"] == 0.95 + + url_str = str(route.calls[0].request.url) + assert "query=heart+attack" in url_str + + @respx.mock + def test_semantic_search_with_filters( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test semantic search with all filters.""" + route = respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json={"success": True, "data": {"results": []}}) + ) + + sync_client.search.semantic( + "heart attack", + vocabulary_ids=["SNOMED", "ICD10CM"], + domain_ids=["Condition"], + standard_concept="S", + concept_class_id="Clinical Finding", + threshold=0.5, + page=2, + page_size=50, + ) + + url_str = str(route.calls[0].request.url) + assert "vocabulary_ids=SNOMED%2CICD10CM" in url_str + assert "domain_ids=Condition" in url_str + assert "standard_concept=S" in url_str + assert "concept_class_id=Clinical+Finding" in url_str + assert "threshold=0.5" in url_str + assert "page=2" in url_str + assert "page_size=50" in url_str + + @respx.mock + def test_semantic_iter_single_page( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test semantic_iter with single page.""" + semantic_response = { + "success": True, + "data": [ + {"concept_id": 1, "similarity_score": 0.9}, + {"concept_id": 2, "similarity_score": 0.8}, + ], + "meta": {"pagination": {"page": 1, "has_next": False}}, + } + respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json=semantic_response) + ) + + results = list(sync_client.search.semantic_iter("diabetes")) + assert len(results) == 2 + + @respx.mock + def test_semantic_iter_multiple_pages( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test semantic_iter auto-pagination.""" + page1_response = { + "success": True, + "data": [{"concept_id": 1, "similarity_score": 0.9}], + "meta": {"pagination": {"page": 1, "has_next": True}}, + } + page2_response = { + "success": True, + "data": [{"concept_id": 2, "similarity_score": 0.8}], + "meta": {"pagination": {"page": 2, "has_next": False}}, + } + + call_count = 0 + + def mock_response(request): + nonlocal call_count + call_count += 1 + if call_count == 1: + return Response(200, json=page1_response) + return Response(200, json=page2_response) + + respx.get(f"{base_url}/concepts/semantic-search").mock(side_effect=mock_response) + + results = list(sync_client.search.semantic_iter("diabetes", page_size=1)) + assert len(results) == 2 + assert results[0]["concept_id"] == 1 + assert results[1]["concept_id"] == 2 + + @respx.mock + def test_semantic_iter_empty_response( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test semantic_iter with empty response yields no items.""" + semantic_response = { + "success": True, + "data": [], + "meta": {"pagination": {"page": 1, "has_next": False}}, + } + respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json=semantic_response) + ) + + results = list(sync_client.search.semantic_iter("nonexistent query")) + assert len(results) == 0 + + +class TestSimilarSearch: + """Tests for similar concept search functionality.""" + + @respx.mock + def test_similar_by_concept_id(self, sync_client: OMOPHub, base_url: str) -> None: + """Test finding similar concepts by concept_id.""" + similar_response = { + "success": True, + "data": { + "similar_concepts": [ + { + "concept_id": 1234, + "concept_name": "Similar condition", + "domain_id": "Condition", + "vocabulary_id": "SNOMED", + "concept_class_id": "Clinical Finding", + "standard_concept": "S", + "concept_code": "12345", + "similarity_score": 0.85, + } + ], + "search_metadata": { + "original_query": "4329847", + "algorithm_used": "hybrid", + "similarity_threshold": 0.7, + "total_candidates": 100, + "results_returned": 1, + }, + }, + } + route = respx.post(f"{base_url}/search/similar").mock( + return_value=Response(200, json=similar_response) + ) + + result = sync_client.search.similar(concept_id=4329847) + assert "similar_concepts" in result + assert len(result["similar_concepts"]) == 1 + + # Verify POST body + import json + + body = json.loads(route.calls[0].request.content) + assert body["concept_id"] == 4329847 + assert body["algorithm"] == "hybrid" + assert body["similarity_threshold"] == 0.7 + + @respx.mock + def test_similar_by_concept_name( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test finding similar concepts by concept_name.""" + route = respx.post(f"{base_url}/search/similar").mock( + return_value=Response( + 200, + json={ + "success": True, + "data": {"similar_concepts": [], "search_metadata": {}}, + }, + ) + ) + + sync_client.search.similar(concept_name="Type 2 diabetes mellitus") + + import json + + body = json.loads(route.calls[0].request.content) + assert body["concept_name"] == "Type 2 diabetes mellitus" + + @respx.mock + def test_similar_by_query(self, sync_client: OMOPHub, base_url: str) -> None: + """Test finding similar concepts by natural language query.""" + route = respx.post(f"{base_url}/search/similar").mock( + return_value=Response( + 200, + json={ + "success": True, + "data": {"similar_concepts": [], "search_metadata": {}}, + }, + ) + ) + + sync_client.search.similar(query="high blood sugar condition") + + import json + + body = json.loads(route.calls[0].request.content) + assert body["query"] == "high blood sugar condition" + + @respx.mock + def test_similar_with_all_options( + self, sync_client: OMOPHub, base_url: str + ) -> None: + """Test similar search with all options.""" + route = respx.post(f"{base_url}/search/similar").mock( + return_value=Response( + 200, + json={ + "success": True, + "data": {"similar_concepts": [], "search_metadata": {}}, + }, + ) + ) + + sync_client.search.similar( + concept_id=4329847, + algorithm="semantic", + similarity_threshold=0.8, + page_size=50, + vocabulary_ids=["SNOMED", "ICD10CM"], + domain_ids=["Condition"], + standard_concept="S", + include_invalid=True, + include_scores=True, + include_explanations=True, + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert body["algorithm"] == "semantic" + assert body["similarity_threshold"] == 0.8 + assert body["page_size"] == 50 + assert body["vocabulary_ids"] == ["SNOMED", "ICD10CM"] + assert body["domain_ids"] == ["Condition"] + assert body["standard_concept"] == "S" + assert body["include_invalid"] is True + assert body["include_scores"] is True + assert body["include_explanations"] is True + + +class TestAsyncSemanticSearch: + """Tests for async semantic search functionality.""" + + @pytest.mark.asyncio + @respx.mock + async def test_async_semantic_search( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async semantic search.""" + semantic_response = { + "success": True, + "data": { + "results": [{"concept_id": 4329847, "similarity_score": 0.95}], + }, + } + respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json=semantic_response) + ) + + result = await async_client.search.semantic("heart attack") + assert "results" in result + + @pytest.mark.asyncio + @respx.mock + async def test_async_semantic_with_filters( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async semantic search with filters.""" + route = respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json={"success": True, "data": {"results": []}}) + ) + + await async_client.search.semantic( + "diabetes", + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + standard_concept="S", + threshold=0.6, + ) + + url_str = str(route.calls[0].request.url) + assert "vocabulary_ids=SNOMED" in url_str + assert "standard_concept=S" in url_str + assert "threshold=0.6" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_semantic_with_all_filters( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async semantic search with all available filters.""" + route = respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json={"success": True, "data": {"results": []}}) + ) + + await async_client.search.semantic( + "heart attack", + vocabulary_ids=["SNOMED", "ICD10CM"], + domain_ids=["Condition", "Observation"], + standard_concept="C", + concept_class_id="Clinical Finding", + threshold=0.7, + page=3, + page_size=50, + ) + + url_str = str(route.calls[0].request.url) + assert "vocabulary_ids=SNOMED%2CICD10CM" in url_str + assert "domain_ids=Condition%2CObservation" in url_str + assert "standard_concept=C" in url_str + assert "concept_class_id=Clinical+Finding" in url_str + assert "threshold=0.7" in url_str + assert "page=3" in url_str + assert "page_size=50" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_semantic_iter_single_page( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async semantic_iter with single page.""" + semantic_response = { + "success": True, + "data": [ + {"concept_id": 1, "similarity_score": 0.9}, + {"concept_id": 2, "similarity_score": 0.8}, + ], + "meta": {"pagination": {"page": 1, "has_next": False}}, + } + respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json=semantic_response) + ) + + results = [] + async for item in async_client.search.semantic_iter("diabetes"): + results.append(item) + + assert len(results) == 2 + assert results[0]["concept_id"] == 1 + assert results[1]["concept_id"] == 2 + + @pytest.mark.asyncio + @respx.mock + async def test_async_semantic_iter_multiple_pages( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async semantic_iter auto-pagination across multiple pages.""" + page1_response = { + "success": True, + "data": [{"concept_id": 1, "similarity_score": 0.9}], + "meta": {"pagination": {"page": 1, "has_next": True}}, + } + page2_response = { + "success": True, + "data": [{"concept_id": 2, "similarity_score": 0.8}], + "meta": {"pagination": {"page": 2, "has_next": True}}, + } + page3_response = { + "success": True, + "data": [{"concept_id": 3, "similarity_score": 0.7}], + "meta": {"pagination": {"page": 3, "has_next": False}}, + } + + call_count = 0 + + def mock_response(request): + nonlocal call_count + call_count += 1 + if call_count == 1: + return Response(200, json=page1_response) + elif call_count == 2: + return Response(200, json=page2_response) + return Response(200, json=page3_response) + + respx.get(f"{base_url}/concepts/semantic-search").mock(side_effect=mock_response) + + results = [] + async for item in async_client.search.semantic_iter("diabetes", page_size=1): + results.append(item) + + assert len(results) == 3 + assert results[0]["concept_id"] == 1 + assert results[1]["concept_id"] == 2 + assert results[2]["concept_id"] == 3 + + @pytest.mark.asyncio + @respx.mock + async def test_async_semantic_iter_with_filters( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async semantic_iter with filters are forwarded correctly.""" + semantic_response = { + "success": True, + "data": [{"concept_id": 1, "similarity_score": 0.9}], + "meta": {"pagination": {"page": 1, "has_next": False}}, + } + route = respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json=semantic_response) + ) + + results = [] + async for item in async_client.search.semantic_iter( + "diabetes", + vocabulary_ids=["SNOMED"], + domain_ids=["Condition"], + standard_concept="S", + concept_class_id="Clinical Finding", + threshold=0.5, + page_size=10, + ): + results.append(item) + + assert len(results) == 1 + url_str = str(route.calls[0].request.url) + assert "vocabulary_ids=SNOMED" in url_str + assert "domain_ids=Condition" in url_str + assert "standard_concept=S" in url_str + assert "concept_class_id=Clinical+Finding" in url_str + assert "threshold=0.5" in url_str + assert "page_size=10" in url_str + + @pytest.mark.asyncio + @respx.mock + async def test_async_semantic_iter_empty_response( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async semantic_iter with empty response yields no items.""" + semantic_response = { + "success": True, + "data": [], + "meta": {"pagination": {"page": 1, "has_next": False}}, + } + respx.get(f"{base_url}/concepts/semantic-search").mock( + return_value=Response(200, json=semantic_response) + ) + + results = [] + async for item in async_client.search.semantic_iter("nonexistent query"): + results.append(item) + + assert len(results) == 0 + + @pytest.mark.asyncio + @respx.mock + async def test_async_similar( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async similar search.""" + similar_response = { + "success": True, + "data": { + "similar_concepts": [{"concept_id": 1234, "similarity_score": 0.85}], + "search_metadata": {"algorithm_used": "hybrid"}, + }, + } + respx.post(f"{base_url}/search/similar").mock( + return_value=Response(200, json=similar_response) + ) + + result = await async_client.search.similar(concept_id=4329847) + assert "similar_concepts" in result + assert len(result["similar_concepts"]) == 1 + + @pytest.mark.asyncio + @respx.mock + async def test_async_similar_by_concept_name( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async similar search by concept_name.""" + route = respx.post(f"{base_url}/search/similar").mock( + return_value=Response( + 200, + json={ + "success": True, + "data": {"similar_concepts": [], "search_metadata": {}}, + }, + ) + ) + + await async_client.search.similar(concept_name="Type 2 diabetes mellitus") + + import json + + body = json.loads(route.calls[0].request.content) + assert body["concept_name"] == "Type 2 diabetes mellitus" + + @pytest.mark.asyncio + @respx.mock + async def test_async_similar_by_query( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async similar search by natural language query.""" + route = respx.post(f"{base_url}/search/similar").mock( + return_value=Response( + 200, + json={ + "success": True, + "data": {"similar_concepts": [], "search_metadata": {}}, + }, + ) + ) + + await async_client.search.similar(query="high blood sugar condition") + + import json + + body = json.loads(route.calls[0].request.content) + assert body["query"] == "high blood sugar condition" + + @pytest.mark.asyncio + @respx.mock + async def test_async_similar_with_all_options( + self, async_client: omophub.AsyncOMOPHub, base_url: str + ) -> None: + """Test async similar search with all options.""" + route = respx.post(f"{base_url}/search/similar").mock( + return_value=Response( + 200, + json={ + "success": True, + "data": {"similar_concepts": [], "search_metadata": {}}, + }, + ) + ) + + await async_client.search.similar( + concept_id=4329847, + algorithm="semantic", + similarity_threshold=0.8, + page_size=50, + vocabulary_ids=["SNOMED", "ICD10CM"], + domain_ids=["Condition"], + standard_concept="S", + include_invalid=True, + include_scores=True, + include_explanations=True, + ) + + import json + + body = json.loads(route.calls[0].request.content) + assert body["algorithm"] == "semantic" + assert body["similarity_threshold"] == 0.8 + assert body["page_size"] == 50 + assert body["vocabulary_ids"] == ["SNOMED", "ICD10CM"] + assert body["domain_ids"] == ["Condition"] + assert body["standard_concept"] == "S" + assert body["include_invalid"] is True + assert body["include_scores"] is True + assert body["include_explanations"] is True + + +class TestBulkBasicSearch: + """Tests for bulk lexical search.""" + + @respx.mock + def test_bulk_basic(self, sync_client: OMOPHub, base_url: str) -> None: + """Test bulk basic search with multiple queries.""" + mock_response = { + "success": True, + "data": { + "results": [ + { + "search_id": "q1", + "query": "diabetes", + "results": [{"concept_id": 201826, "concept_name": "Type 2 diabetes"}], + "status": "completed", + "duration": 15, + }, + { + "search_id": "q2", + "query": "hypertension", + "results": [{"concept_id": 316866, "concept_name": "Hypertensive disorder"}], + "status": "completed", + "duration": 12, + }, + ], + "total_searches": 2, + "completed_searches": 2, + "failed_searches": 0, + }, + } + respx.post(f"{base_url}/search/bulk").mock( + return_value=Response(200, json=mock_response) + ) + + result = sync_client.search.bulk_basic([ + {"search_id": "q1", "query": "diabetes"}, + {"search_id": "q2", "query": "hypertension"}, + ]) + + assert len(result["results"]) == 2 + assert result["total_searches"] == 2 + assert result["completed_searches"] == 2 + assert result["results"][0]["search_id"] == "q1" + + @respx.mock + def test_bulk_basic_with_defaults(self, sync_client: OMOPHub, base_url: str) -> None: + """Test bulk basic search with shared defaults.""" + import json + + respx.post(f"{base_url}/search/bulk").mock( + return_value=Response(200, json={ + "success": True, + "data": { + "results": [], + "total_searches": 1, + "completed_searches": 1, + "failed_searches": 0, + }, + }) + ) + + sync_client.search.bulk_basic( + [{"search_id": "q1", "query": "diabetes"}], + defaults={"vocabulary_ids": ["SNOMED"], "page_size": 5}, + ) + + request_body = json.loads(respx.calls[0].request.content) + assert request_body["defaults"]["vocabulary_ids"] == ["SNOMED"] + assert request_body["defaults"]["page_size"] == 5 + + +class TestBulkSemanticSearch: + """Tests for bulk semantic search.""" + + @respx.mock + def test_bulk_semantic(self, sync_client: OMOPHub, base_url: str) -> None: + """Test bulk semantic search with multiple queries.""" + mock_response = { + "success": True, + "data": { + "results": [ + { + "search_id": "s1", + "query": "heart failure treatment", + "results": [{"concept_id": 316139, "similarity_score": 0.92}], + "status": "completed", + "result_count": 1, + "duration": 45, + }, + ], + "total_searches": 1, + "completed_count": 1, + "failed_count": 0, + "total_duration": 45, + }, + } + respx.post(f"{base_url}/search/semantic-bulk").mock( + return_value=Response(200, json=mock_response) + ) + + result = sync_client.search.bulk_semantic([ + {"search_id": "s1", "query": "heart failure treatment"}, + ]) + + assert len(result["results"]) == 1 + assert result["completed_count"] == 1 + assert result["results"][0]["status"] == "completed" + + @respx.mock + def test_bulk_semantic_with_defaults(self, sync_client: OMOPHub, base_url: str) -> None: + """Test bulk semantic search with shared defaults.""" + import json + + respx.post(f"{base_url}/search/semantic-bulk").mock( + return_value=Response(200, json={ + "success": True, + "data": { + "results": [], + "total_searches": 1, + "completed_count": 1, + "failed_count": 0, + }, + }) + ) + + sync_client.search.bulk_semantic( + [{"search_id": "s1", "query": "diabetes medications"}], + defaults={"threshold": 0.8, "page_size": 10, "vocabulary_ids": ["SNOMED"]}, + ) + + request_body = json.loads(respx.calls[0].request.content) + assert request_body["defaults"]["threshold"] == 0.8 + assert request_body["defaults"]["page_size"] == 10 + assert request_body["defaults"]["vocabulary_ids"] == ["SNOMED"] diff --git a/tests/unit/resources/test_vocabularies.py b/tests/unit/resources/test_vocabularies.py index 7e3fd35..6016651 100644 --- a/tests/unit/resources/test_vocabularies.py +++ b/tests/unit/resources/test_vocabularies.py @@ -91,25 +91,16 @@ def test_get_vocabulary(self, sync_client: OMOPHub, base_url: str) -> None: assert result["vocabulary_id"] == "SNOMED" @respx.mock - def test_get_vocabulary_with_options( - self, sync_client: OMOPHub, base_url: str - ) -> None: - """Test getting vocabulary with include options.""" - route = respx.get(f"{base_url}/vocabularies/SNOMED").mock( + def test_get_vocabulary_basic(self, sync_client: OMOPHub, base_url: str) -> None: + """Test getting vocabulary (no additional options - use stats() for statistics).""" + respx.get(f"{base_url}/vocabularies/SNOMED").mock( return_value=Response( 200, json={"success": True, "data": {"vocabulary_id": "SNOMED"}} ) ) - sync_client.vocabularies.get( - "SNOMED", - include_stats=True, - include_domains=True, - ) - - url_str = str(route.calls[0].request.url) - assert "include_stats=true" in url_str - assert "include_domains=true" in url_str + result = sync_client.vocabularies.get("SNOMED") + assert result["vocabulary_id"] == "SNOMED" @respx.mock def test_get_vocabulary_stats(self, sync_client: OMOPHub, base_url: str) -> None: @@ -132,29 +123,22 @@ def test_get_vocabulary_stats(self, sync_client: OMOPHub, base_url: str) -> None @respx.mock def test_get_vocabulary_domains(self, sync_client: OMOPHub, base_url: str) -> None: - """Test getting vocabulary domains.""" + """Test getting all standard OHDSI domains.""" domains_response = { "success": True, "data": { "domains": [ - {"domain_id": "Condition", "concept_count": 150000}, - {"domain_id": "Drug", "concept_count": 100000}, + {"domain_id": "Condition", "domain_name": "Condition"}, + {"domain_id": "Drug", "domain_name": "Drug"}, ], }, } - route = respx.get(f"{base_url}/vocabularies/domains").mock( + respx.get(f"{base_url}/vocabularies/domains").mock( return_value=Response(200, json=domains_response) ) - result = sync_client.vocabularies.domains( - vocabulary_ids=["SNOMED"], page=1, page_size=25 - ) - + result = sync_client.vocabularies.domains() assert "domains" in result - url_str = str(route.calls[0].request.url) - assert "vocabulary_ids=SNOMED" in url_str - assert "page=1" in url_str - assert "page_size=25" in url_str @respx.mock def test_get_vocabulary_concepts(self, sync_client: OMOPHub, base_url: str) -> None: @@ -174,17 +158,25 @@ def test_get_vocabulary_concepts(self, sync_client: OMOPHub, base_url: str) -> N sync_client.vocabularies.concepts( "SNOMED", - domain_id="Condition", - concept_class_id="Clinical Finding", - standard_only=True, + search="diabetes", + standard_concept="S", + include_invalid=True, + include_relationships=True, + include_synonyms=True, + sort_by="concept_id", + sort_order="desc", page=1, page_size=100, ) url_str = str(route.calls[0].request.url) - assert "domain_id=Condition" in url_str - assert "concept_class_id=Clinical+Finding" in url_str - assert "standard_only=true" in url_str + assert "search=diabetes" in url_str + assert "standard_concept=S" in url_str + assert "include_invalid=true" in url_str + assert "include_relationships=true" in url_str + assert "include_synonyms=true" in url_str + assert "sort_by=concept_id" in url_str + assert "sort_order=desc" in url_str class TestAsyncVocabulariesResource: @@ -247,25 +239,18 @@ async def test_async_get_vocabulary( @pytest.mark.asyncio @respx.mock - async def test_async_get_vocabulary_with_options( + async def test_async_get_vocabulary_basic( self, async_client: omophub.AsyncOMOPHub, base_url: str ) -> None: - """Test async get vocabulary with options.""" - route = respx.get(f"{base_url}/vocabularies/SNOMED").mock( + """Test async get vocabulary (no additional options - use stats() for statistics).""" + respx.get(f"{base_url}/vocabularies/SNOMED").mock( return_value=Response( 200, json={"success": True, "data": {"vocabulary_id": "SNOMED"}} ) ) - await async_client.vocabularies.get( - "SNOMED", - include_stats=True, - include_domains=True, - ) - - url_str = str(route.calls[0].request.url) - assert "include_stats=true" in url_str - assert "include_domains=true" in url_str + result = await async_client.vocabularies.get("SNOMED") + assert result["vocabulary_id"] == "SNOMED" @pytest.mark.asyncio @respx.mock @@ -291,21 +276,13 @@ async def test_async_get_vocabulary_stats( async def test_async_get_vocabulary_domains( self, async_client: omophub.AsyncOMOPHub, base_url: str ) -> None: - """Test async getting vocabulary domains.""" - route = respx.get(f"{base_url}/vocabularies/domains").mock( - return_value=Response( - 200, json={"success": True, "data": {"domains": []}} - ) - ) - - await async_client.vocabularies.domains( - vocabulary_ids=["SNOMED"], page=2, page_size=30 + """Test async getting all standard OHDSI domains.""" + respx.get(f"{base_url}/vocabularies/domains").mock( + return_value=Response(200, json={"success": True, "data": {"domains": []}}) ) - url_str = str(route.calls[0].request.url) - assert "vocabulary_ids=SNOMED" in url_str - assert "page=2" in url_str - assert "page_size=30" in url_str + result = await async_client.vocabularies.domains() + assert "domains" in result @pytest.mark.asyncio @respx.mock @@ -314,21 +291,25 @@ async def test_async_get_vocabulary_concepts( ) -> None: """Test async getting vocabulary concepts.""" route = respx.get(f"{base_url}/vocabularies/SNOMED/concepts").mock( - return_value=Response( - 200, json={"success": True, "data": {"concepts": []}} - ) + return_value=Response(200, json={"success": True, "data": {"concepts": []}}) ) await async_client.vocabularies.concepts( "SNOMED", - domain_id="Drug", - concept_class_id="Ingredient", - standard_only=True, + search="aspirin", + standard_concept="S", + include_invalid=True, + include_relationships=True, + include_synonyms=True, + sort_by="name", + sort_order="asc", page=1, page_size=50, ) url_str = str(route.calls[0].request.url) - assert "domain_id=Drug" in url_str - assert "concept_class_id=Ingredient" in url_str - assert "standard_only=true" in url_str + assert "search=aspirin" in url_str + assert "standard_concept=S" in url_str + assert "include_invalid=true" in url_str + assert "include_relationships=true" in url_str + assert "include_synonyms=true" in url_str diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index c378206..793c512 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -13,17 +13,12 @@ class TestOMOPHubClient: """Tests for the synchronous OMOPHub client.""" - def test_client_requires_api_key(self) -> None: + def test_client_requires_api_key(self, monkeypatch: pytest.MonkeyPatch) -> None: """Test that client raises error without API key.""" - # Clear any module-level API key - original_key = omophub.api_key - omophub.api_key = None + monkeypatch.setattr("omophub._client.default_api_key", None) - try: - with pytest.raises(AuthenticationError): - OMOPHub() - finally: - omophub.api_key = original_key + with pytest.raises(AuthenticationError): + OMOPHub() def test_client_accepts_api_key(self, api_key: str) -> None: """Test that client accepts API key parameter.""" @@ -81,7 +76,9 @@ async def test_async_get_concept( concept = await async_client.concepts.get(201826) assert concept["concept_id"] == 201826 - def test_async_client_has_resources(self, async_client: omophub.AsyncOMOPHub) -> None: + def test_async_client_has_resources( + self, async_client: omophub.AsyncOMOPHub + ) -> None: """Test that async client has all expected resources.""" assert hasattr(async_client, "concepts") assert hasattr(async_client, "search") @@ -91,16 +88,12 @@ def test_async_client_has_resources(self, async_client: omophub.AsyncOMOPHub) -> assert hasattr(async_client, "vocabularies") assert hasattr(async_client, "domains") - def test_async_client_requires_api_key(self) -> None: + def test_async_client_requires_api_key(self, monkeypatch: pytest.MonkeyPatch) -> None: """Test that async client raises error without API key.""" - original_key = omophub.api_key - omophub.api_key = None + monkeypatch.setattr("omophub._client.default_api_key", None) - try: - with pytest.raises(AuthenticationError): - omophub.AsyncOMOPHub() - finally: - omophub.api_key = original_key + with pytest.raises(AuthenticationError): + omophub.AsyncOMOPHub() class TestClientLazyPropertyCaching: @@ -204,11 +197,11 @@ def test_client_custom_max_retries(self, api_key: str) -> None: def test_client_vocab_version(self, api_key: str) -> None: """Test client accepts vocab_version parameter.""" - client = OMOPHub(api_key=api_key, vocab_version="2024.4") + client = OMOPHub(api_key=api_key, vocab_version="2024.2") - assert client._vocab_version == "2024.4" + assert client._vocab_version == "2024.2" # Verify it's passed to request handler - assert client._request._vocab_version == "2024.4" + assert client._request._vocab_version == "2024.2" client.close() diff --git a/tests/unit/test_http.py b/tests/unit/test_http.py index 694fefc..23ef7f4 100644 --- a/tests/unit/test_http.py +++ b/tests/unit/test_http.py @@ -29,7 +29,7 @@ def test_request_success(self) -> None: return_value=Response(200, json={"success": True}) ) - content, status_code, headers = client.request( + content, status_code, _ = client.request( "GET", "https://api.example.com/test" ) @@ -167,7 +167,7 @@ def side_effect(request: httpx.Request) -> Response: respx.get("https://api.example.com/test").mock(side_effect=side_effect) with patch("time.sleep"): # Skip actual sleep - content, status_code, _ = client.request( + _, status_code, _ = client.request( "GET", "https://api.example.com/test" ) @@ -185,9 +185,8 @@ def test_max_retries_exceeded(self) -> None: side_effect=httpx.ConnectError("Connection refused") ) - with patch("time.sleep"): # Skip actual sleep - with pytest.raises(ConnectionError): - client.request("GET", "https://api.example.com/test") + with patch("time.sleep"), pytest.raises(ConnectionError): + client.request("GET", "https://api.example.com/test") client.close() @@ -201,7 +200,7 @@ def test_default_headers(self) -> None: assert "Content-Type" in headers assert headers["Content-Type"] == "application/json" assert "User-Agent" in headers - assert "omophub-python" in headers["User-Agent"] + assert "OMOPHub-SDK-Python" in headers["User-Agent"] client.close() @@ -244,7 +243,7 @@ async def test_request_success(self) -> None: return_value=Response(200, json={"success": True}) ) - content, status_code, headers = await client.request( + content, status_code, _ = await client.request( "GET", "https://api.example.com/test" ) @@ -325,7 +324,7 @@ def side_effect(request: httpx.Request) -> Response: respx.get("https://api.example.com/test").mock(side_effect=side_effect) with patch("asyncio.sleep"): # Skip actual sleep - content, status_code, _ = await client.request( + _, status_code, _ = await client.request( "GET", "https://api.example.com/test" ) @@ -350,7 +349,7 @@ async def test_default_headers(self) -> None: assert "Accept" in headers assert headers["Accept"] == "application/json" assert "User-Agent" in headers - assert "omophub-python" in headers["User-Agent"] + assert "OMOPHub-SDK-Python" in headers["User-Agent"] await client.close() diff --git a/tests/unit/test_pagination.py b/tests/unit/test_pagination.py index 99b9306..97413f6 100644 --- a/tests/unit/test_pagination.py +++ b/tests/unit/test_pagination.py @@ -56,7 +56,9 @@ def test_build_query_string_caps_page_size(self) -> None: def test_build_paginated_path_without_existing_query(self) -> None: """Test building path without existing query string.""" - result = PaginationHelper.build_paginated_path("/concepts", page=1, page_size=20) + result = PaginationHelper.build_paginated_path( + "/concepts", page=1, page_size=20 + ) assert result.startswith("/concepts?") assert "page=1" in result @@ -197,12 +199,12 @@ async def fetch_page(page: int, page_size: int) -> tuple: assert result == items @pytest.mark.asyncio - async def test_sync_callable_fallback(self) -> None: - """Test paginate_async works with sync callables too.""" + async def test_async_callable_required(self) -> None: + """Test paginate_async requires an async callable (not sync).""" items = [{"id": 1}, {"id": 2}] - meta = {"page": 1, "has_next": False} + meta: dict[str, object] = {"page": 1, "has_next": False} - def fetch_page(page: int, page_size: int) -> tuple: + async def fetch_page(page: int, page_size: int) -> tuple: return items, meta result = [item async for item in paginate_async(fetch_page)] diff --git a/tests/unit/test_request.py b/tests/unit/test_request.py index e0fbbdb..5a652f9 100644 --- a/tests/unit/test_request.py +++ b/tests/unit/test_request.py @@ -103,7 +103,7 @@ def test_vocab_version_header(self) -> None: http_client=http_client, base_url="https://api.example.com/v1", api_key="test_api_key", - vocab_version="2024.4", + vocab_version="2024.2", ) with respx.mock: @@ -114,7 +114,7 @@ def test_vocab_version_header(self) -> None: request_handler.get("/test") assert "X-Vocab-Version" in route.calls[0].request.headers - assert route.calls[0].request.headers["X-Vocab-Version"] == "2024.4" + assert route.calls[0].request.headers["X-Vocab-Version"] == "2024.2" def test_request_id_extraction(self, request_handler: Request) -> None: """Test request ID is extracted from headers on error.""" @@ -176,9 +176,7 @@ def test_error_parsing_401(self, request_handler: Request) -> None: assert exc_info.value.status_code == 401 - def test_error_parsing_429_with_retry_after( - self, request_handler: Request - ) -> None: + def test_error_parsing_429_with_retry_after(self, request_handler: Request) -> None: """Test 429 error parsing with Retry-After header.""" with respx.mock: respx.get("https://api.example.com/v1/test").mock( @@ -228,9 +226,7 @@ def test_json_decode_error(self, request_handler: Request) -> None: assert "Invalid JSON" in str(exc_info.value) - def test_json_decode_error_on_error_status( - self, request_handler: Request - ) -> None: + def test_json_decode_error_on_error_status(self, request_handler: Request) -> None: """Test handling of invalid JSON on error status code.""" with respx.mock: respx.get("https://api.example.com/v1/test").mock( @@ -264,6 +260,83 @@ def test_url_building(self) -> None: # Test without leading slash assert request._build_url("concepts") == "https://api.example.com/v1/concepts" + def test_get_raw_request(self, request_handler: Request) -> None: + """Test get_raw returns full response with data and meta.""" + with respx.mock: + respx.get("https://api.example.com/v1/search").mock( + return_value=Response( + 200, + json={ + "success": True, + "data": {"concepts": [{"concept_id": 1}]}, + "meta": {"pagination": {"page": 1, "total_pages": 5}}, + }, + ) + ) + result = request_handler.get_raw("/search") + assert "data" in result + assert "meta" in result + assert result["meta"]["pagination"]["page"] == 1 + assert result["meta"]["pagination"]["total_pages"] == 5 + + def test_get_raw_with_params(self, request_handler: Request) -> None: + """Test get_raw passes query parameters correctly.""" + with respx.mock: + route = respx.get("https://api.example.com/v1/search").mock( + return_value=Response( + 200, + json={ + "data": {"concepts": []}, + "meta": {"pagination": {"page": 2, "has_next": True}}, + }, + ) + ) + + result = request_handler.get_raw("/search", params={"query": "test", "page": 2}) + + url_str = str(route.calls[0].request.url) + assert "query=test" in url_str + assert "page=2" in url_str + assert result["meta"]["pagination"]["page"] == 2 + + def test_get_raw_error_parsing(self, request_handler: Request) -> None: + """Test get_raw raises errors correctly.""" + with respx.mock: + respx.get("https://api.example.com/v1/test").mock( + return_value=Response( + 404, + json={"error": {"message": "Not found"}}, + headers={"X-Request-Id": "req_123"}, + ) + ) + with pytest.raises(NotFoundError) as exc_info: + request_handler.get_raw("/test") + assert exc_info.value.request_id == "req_123" + + def test_get_raw_rate_limit(self, request_handler: Request) -> None: + """Test get_raw handles rate limit with retry-after.""" + with respx.mock: + respx.get("https://api.example.com/v1/test").mock( + return_value=Response( + 429, + json={"error": {"message": "Rate limited"}}, + headers={"Retry-After": "45"}, + ) + ) + with pytest.raises(RateLimitError) as exc_info: + request_handler.get_raw("/test") + assert exc_info.value.retry_after == 45 + + def test_get_raw_json_decode_error(self, request_handler: Request) -> None: + """Test get_raw handles invalid JSON.""" + with respx.mock: + respx.get("https://api.example.com/v1/test").mock( + return_value=Response(200, content=b"not json") + ) + with pytest.raises(OMOPHubError) as exc_info: + request_handler.get_raw("/test") + assert "Invalid JSON" in str(exc_info.value) + class TestAsyncRequest: """Tests for asynchronous AsyncRequest class.""" @@ -383,9 +456,7 @@ async def test_async_error_parsing(self, request_handler: AsyncRequest) -> None: assert exc_info.value.request_id == "req_xyz789" @pytest.mark.asyncio - async def test_async_json_decode_error( - self, request_handler: AsyncRequest - ) -> None: + async def test_async_json_decode_error(self, request_handler: AsyncRequest) -> None: """Test async handling of invalid JSON.""" with respx.mock: respx.get("https://api.example.com/v1/test").mock( @@ -398,9 +469,7 @@ async def test_async_json_decode_error( assert "Invalid JSON" in str(exc_info.value) @pytest.mark.asyncio - async def test_async_rate_limit_error( - self, request_handler: AsyncRequest - ) -> None: + async def test_async_rate_limit_error(self, request_handler: AsyncRequest) -> None: """Test async 429 rate limit error with retry-after.""" with respx.mock: respx.get("https://api.example.com/v1/test").mock( @@ -415,3 +484,85 @@ async def test_async_rate_limit_error( await request_handler.get("/test") assert exc_info.value.retry_after == 30 + + @pytest.mark.asyncio + async def test_async_get_raw_request(self, request_handler: AsyncRequest) -> None: + """Test async get_raw returns full response with data and meta.""" + with respx.mock: + respx.get("https://api.example.com/v1/search").mock( + return_value=Response( + 200, + json={ + "data": {"concepts": [{"concept_id": 42}]}, + "meta": {"pagination": {"page": 1, "has_next": True, "total_pages": 3}}, + }, + ) + ) + result = await request_handler.get_raw("/search") + assert "data" in result + assert "meta" in result + assert result["meta"]["pagination"]["page"] == 1 + assert result["meta"]["pagination"]["has_next"] is True + + @pytest.mark.asyncio + async def test_async_get_raw_with_params(self, request_handler: AsyncRequest) -> None: + """Test async get_raw passes query parameters correctly.""" + with respx.mock: + route = respx.get("https://api.example.com/v1/search").mock( + return_value=Response( + 200, + json={ + "data": {"concepts": []}, + "meta": {"pagination": {"page": 3}}, + }, + ) + ) + + result = await request_handler.get_raw("/search", params={"page": 3}) + + url_str = str(route.calls[0].request.url) + assert "page=3" in url_str + assert result["meta"]["pagination"]["page"] == 3 + + @pytest.mark.asyncio + async def test_async_get_raw_error(self, request_handler: AsyncRequest) -> None: + """Test async get_raw raises errors correctly.""" + with respx.mock: + respx.get("https://api.example.com/v1/test").mock( + return_value=Response( + 404, + json={"error": {"message": "Not found"}}, + headers={"X-Request-Id": "req_async_456"}, + ) + ) + with pytest.raises(NotFoundError) as exc_info: + await request_handler.get_raw("/test") + assert exc_info.value.request_id == "req_async_456" + + @pytest.mark.asyncio + async def test_async_get_raw_rate_limit(self, request_handler: AsyncRequest) -> None: + """Test async get_raw handles rate limit with retry-after.""" + with respx.mock: + respx.get("https://api.example.com/v1/test").mock( + return_value=Response( + 429, + json={"error": {"message": "Rate limited"}}, + headers={"Retry-After": "60"}, + ) + ) + with pytest.raises(RateLimitError) as exc_info: + await request_handler.get_raw("/test") + assert exc_info.value.retry_after == 60 + + @pytest.mark.asyncio + async def test_async_get_raw_json_decode_error( + self, request_handler: AsyncRequest + ) -> None: + """Test async get_raw handles invalid JSON.""" + with respx.mock: + respx.get("https://api.example.com/v1/test").mock( + return_value=Response(200, content=b"invalid json response") + ) + with pytest.raises(OMOPHubError) as exc_info: + await request_handler.get_raw("/test") + assert "Invalid JSON" in str(exc_info.value) diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..48e0f61 --- /dev/null +++ b/uv.lock @@ -0,0 +1,563 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "backports-asyncio-runner" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coverage" +version = "7.13.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/23/f9/e92df5e07f3fc8d4c7f9a0f146ef75446bf870351cd37b788cf5897f8079/coverage-7.13.1.tar.gz", hash = "sha256:b7593fe7eb5feaa3fbb461ac79aac9f9fc0387a5ca8080b0c6fe2ca27b091afd", size = 825862, upload-time = "2025-12-28T15:42:56.969Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/9a/3742e58fd04b233df95c012ee9f3dfe04708a5e1d32613bd2d47d4e1be0d/coverage-7.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e1fa280b3ad78eea5be86f94f461c04943d942697e0dac889fa18fff8f5f9147", size = 218633, upload-time = "2025-12-28T15:40:10.165Z" }, + { url = "https://files.pythonhosted.org/packages/7e/45/7e6bdc94d89cd7c8017ce735cf50478ddfe765d4fbf0c24d71d30ea33d7a/coverage-7.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c3d8c679607220979434f494b139dfb00131ebf70bb406553d69c1ff01a5c33d", size = 219147, upload-time = "2025-12-28T15:40:12.069Z" }, + { url = "https://files.pythonhosted.org/packages/f7/38/0d6a258625fd7f10773fe94097dc16937a5f0e3e0cdf3adef67d3ac6baef/coverage-7.13.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339dc63b3eba969067b00f41f15ad161bf2946613156fb131266d8debc8e44d0", size = 245894, upload-time = "2025-12-28T15:40:13.556Z" }, + { url = "https://files.pythonhosted.org/packages/27/58/409d15ea487986994cbd4d06376e9860e9b157cfbfd402b1236770ab8dd2/coverage-7.13.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:db622b999ffe49cb891f2fff3b340cdc2f9797d01a0a202a0973ba2562501d90", size = 247721, upload-time = "2025-12-28T15:40:15.37Z" }, + { url = "https://files.pythonhosted.org/packages/da/bf/6e8056a83fd7a96c93341f1ffe10df636dd89f26d5e7b9ca511ce3bcf0df/coverage-7.13.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1443ba9acbb593fa7c1c29e011d7c9761545fe35e7652e85ce7f51a16f7e08d", size = 249585, upload-time = "2025-12-28T15:40:17.226Z" }, + { url = "https://files.pythonhosted.org/packages/f4/15/e1daff723f9f5959acb63cbe35b11203a9df77ee4b95b45fffd38b318390/coverage-7.13.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c832ec92c4499ac463186af72f9ed4d8daec15499b16f0a879b0d1c8e5cf4a3b", size = 246597, upload-time = "2025-12-28T15:40:19.028Z" }, + { url = "https://files.pythonhosted.org/packages/74/a6/1efd31c5433743a6ddbc9d37ac30c196bb07c7eab3d74fbb99b924c93174/coverage-7.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:562ec27dfa3f311e0db1ba243ec6e5f6ab96b1edfcfc6cf86f28038bc4961ce6", size = 247626, upload-time = "2025-12-28T15:40:20.846Z" }, + { url = "https://files.pythonhosted.org/packages/6d/9f/1609267dd3e749f57fdd66ca6752567d1c13b58a20a809dc409b263d0b5f/coverage-7.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4de84e71173d4dada2897e5a0e1b7877e5eefbfe0d6a44edee6ce31d9b8ec09e", size = 245629, upload-time = "2025-12-28T15:40:22.397Z" }, + { url = "https://files.pythonhosted.org/packages/e2/f6/6815a220d5ec2466383d7cc36131b9fa6ecbe95c50ec52a631ba733f306a/coverage-7.13.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:a5a68357f686f8c4d527a2dc04f52e669c2fc1cbde38f6f7eb6a0e58cbd17cae", size = 245901, upload-time = "2025-12-28T15:40:23.836Z" }, + { url = "https://files.pythonhosted.org/packages/ac/58/40576554cd12e0872faf6d2c0eb3bc85f71d78427946ddd19ad65201e2c0/coverage-7.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:77cc258aeb29a3417062758975521eae60af6f79e930d6993555eeac6a8eac29", size = 246505, upload-time = "2025-12-28T15:40:25.421Z" }, + { url = "https://files.pythonhosted.org/packages/3b/77/9233a90253fba576b0eee81707b5781d0e21d97478e5377b226c5b096c0f/coverage-7.13.1-cp310-cp310-win32.whl", hash = "sha256:bb4f8c3c9a9f34423dba193f241f617b08ffc63e27f67159f60ae6baf2dcfe0f", size = 221257, upload-time = "2025-12-28T15:40:27.217Z" }, + { url = "https://files.pythonhosted.org/packages/e0/43/e842ff30c1a0a623ec80db89befb84a3a7aad7bfe44a6ea77d5a3e61fedd/coverage-7.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8e2706ceb622bc63bac98ebb10ef5da80ed70fbd8a7999a5076de3afaef0fb1", size = 222191, upload-time = "2025-12-28T15:40:28.916Z" }, + { url = "https://files.pythonhosted.org/packages/b4/9b/77baf488516e9ced25fc215a6f75d803493fc3f6a1a1227ac35697910c2a/coverage-7.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a55d509a1dc5a5b708b5dad3b5334e07a16ad4c2185e27b40e4dba796ab7f88", size = 218755, upload-time = "2025-12-28T15:40:30.812Z" }, + { url = "https://files.pythonhosted.org/packages/d7/cd/7ab01154e6eb79ee2fab76bf4d89e94c6648116557307ee4ebbb85e5c1bf/coverage-7.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4d010d080c4888371033baab27e47c9df7d6fb28d0b7b7adf85a4a49be9298b3", size = 219257, upload-time = "2025-12-28T15:40:32.333Z" }, + { url = "https://files.pythonhosted.org/packages/01/d5/b11ef7863ffbbdb509da0023fad1e9eda1c0eaea61a6d2ea5b17d4ac706e/coverage-7.13.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d938b4a840fb1523b9dfbbb454f652967f18e197569c32266d4d13f37244c3d9", size = 249657, upload-time = "2025-12-28T15:40:34.1Z" }, + { url = "https://files.pythonhosted.org/packages/f7/7c/347280982982383621d29b8c544cf497ae07ac41e44b1ca4903024131f55/coverage-7.13.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf100a3288f9bb7f919b87eb84f87101e197535b9bd0e2c2b5b3179633324fee", size = 251581, upload-time = "2025-12-28T15:40:36.131Z" }, + { url = "https://files.pythonhosted.org/packages/82/f6/ebcfed11036ade4c0d75fa4453a6282bdd225bc073862766eec184a4c643/coverage-7.13.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef6688db9bf91ba111ae734ba6ef1a063304a881749726e0d3575f5c10a9facf", size = 253691, upload-time = "2025-12-28T15:40:37.626Z" }, + { url = "https://files.pythonhosted.org/packages/02/92/af8f5582787f5d1a8b130b2dcba785fa5e9a7a8e121a0bb2220a6fdbdb8a/coverage-7.13.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0b609fc9cdbd1f02e51f67f51e5aee60a841ef58a68d00d5ee2c0faf357481a3", size = 249799, upload-time = "2025-12-28T15:40:39.47Z" }, + { url = "https://files.pythonhosted.org/packages/24/aa/0e39a2a3b16eebf7f193863323edbff38b6daba711abaaf807d4290cf61a/coverage-7.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c43257717611ff5e9a1d79dce8e47566235ebda63328718d9b65dd640bc832ef", size = 251389, upload-time = "2025-12-28T15:40:40.954Z" }, + { url = "https://files.pythonhosted.org/packages/73/46/7f0c13111154dc5b978900c0ccee2e2ca239b910890e674a77f1363d483e/coverage-7.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e09fbecc007f7b6afdfb3b07ce5bd9f8494b6856dd4f577d26c66c391b829851", size = 249450, upload-time = "2025-12-28T15:40:42.489Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ca/e80da6769e8b669ec3695598c58eef7ad98b0e26e66333996aee6316db23/coverage-7.13.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:a03a4f3a19a189919c7055098790285cc5c5b0b3976f8d227aea39dbf9f8bfdb", size = 249170, upload-time = "2025-12-28T15:40:44.279Z" }, + { url = "https://files.pythonhosted.org/packages/af/18/9e29baabdec1a8644157f572541079b4658199cfd372a578f84228e860de/coverage-7.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3820778ea1387c2b6a818caec01c63adc5b3750211af6447e8dcfb9b6f08dbba", size = 250081, upload-time = "2025-12-28T15:40:45.748Z" }, + { url = "https://files.pythonhosted.org/packages/00/f8/c3021625a71c3b2f516464d322e41636aea381018319050a8114105872ee/coverage-7.13.1-cp311-cp311-win32.whl", hash = "sha256:ff10896fa55167371960c5908150b434b71c876dfab97b69478f22c8b445ea19", size = 221281, upload-time = "2025-12-28T15:40:47.232Z" }, + { url = "https://files.pythonhosted.org/packages/27/56/c216625f453df6e0559ed666d246fcbaaa93f3aa99eaa5080cea1229aa3d/coverage-7.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:a998cc0aeeea4c6d5622a3754da5a493055d2d95186bad877b0a34ea6e6dbe0a", size = 222215, upload-time = "2025-12-28T15:40:49.19Z" }, + { url = "https://files.pythonhosted.org/packages/5c/9a/be342e76f6e531cae6406dc46af0d350586f24d9b67fdfa6daee02df71af/coverage-7.13.1-cp311-cp311-win_arm64.whl", hash = "sha256:fea07c1a39a22614acb762e3fbbb4011f65eedafcb2948feeef641ac78b4ee5c", size = 220886, upload-time = "2025-12-28T15:40:51.067Z" }, + { url = "https://files.pythonhosted.org/packages/ce/8a/87af46cccdfa78f53db747b09f5f9a21d5fc38d796834adac09b30a8ce74/coverage-7.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6f34591000f06e62085b1865c9bc5f7858df748834662a51edadfd2c3bfe0dd3", size = 218927, upload-time = "2025-12-28T15:40:52.814Z" }, + { url = "https://files.pythonhosted.org/packages/82/a8/6e22fdc67242a4a5a153f9438d05944553121c8f4ba70cb072af4c41362e/coverage-7.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b67e47c5595b9224599016e333f5ec25392597a89d5744658f837d204e16c63e", size = 219288, upload-time = "2025-12-28T15:40:54.262Z" }, + { url = "https://files.pythonhosted.org/packages/d0/0a/853a76e03b0f7c4375e2ca025df45c918beb367f3e20a0a8e91967f6e96c/coverage-7.13.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e7b8bd70c48ffb28461ebe092c2345536fb18bbbf19d287c8913699735f505c", size = 250786, upload-time = "2025-12-28T15:40:56.059Z" }, + { url = "https://files.pythonhosted.org/packages/ea/b4/694159c15c52b9f7ec7adf49d50e5f8ee71d3e9ef38adb4445d13dd56c20/coverage-7.13.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c223d078112e90dc0e5c4e35b98b9584164bea9fbbd221c0b21c5241f6d51b62", size = 253543, upload-time = "2025-12-28T15:40:57.585Z" }, + { url = "https://files.pythonhosted.org/packages/96/b2/7f1f0437a5c855f87e17cf5d0dc35920b6440ff2b58b1ba9788c059c26c8/coverage-7.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:794f7c05af0763b1bbd1b9e6eff0e52ad068be3b12cd96c87de037b01390c968", size = 254635, upload-time = "2025-12-28T15:40:59.443Z" }, + { url = "https://files.pythonhosted.org/packages/e9/d1/73c3fdb8d7d3bddd9473c9c6a2e0682f09fc3dfbcb9c3f36412a7368bcab/coverage-7.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0642eae483cc8c2902e4af7298bf886d605e80f26382124cddc3967c2a3df09e", size = 251202, upload-time = "2025-12-28T15:41:01.328Z" }, + { url = "https://files.pythonhosted.org/packages/66/3c/f0edf75dcc152f145d5598329e864bbbe04ab78660fe3e8e395f9fff010f/coverage-7.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f5e772ed5fef25b3de9f2008fe67b92d46831bd2bc5bdc5dd6bfd06b83b316f", size = 252566, upload-time = "2025-12-28T15:41:03.319Z" }, + { url = "https://files.pythonhosted.org/packages/17/b3/e64206d3c5f7dcbceafd14941345a754d3dbc78a823a6ed526e23b9cdaab/coverage-7.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45980ea19277dc0a579e432aef6a504fe098ef3a9032ead15e446eb0f1191aee", size = 250711, upload-time = "2025-12-28T15:41:06.411Z" }, + { url = "https://files.pythonhosted.org/packages/dc/ad/28a3eb970a8ef5b479ee7f0c484a19c34e277479a5b70269dc652b730733/coverage-7.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:e4f18eca6028ffa62adbd185a8f1e1dd242f2e68164dba5c2b74a5204850b4cf", size = 250278, upload-time = "2025-12-28T15:41:08.285Z" }, + { url = "https://files.pythonhosted.org/packages/54/e3/c8f0f1a93133e3e1291ca76cbb63565bd4b5c5df63b141f539d747fff348/coverage-7.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f8dca5590fec7a89ed6826fce625595279e586ead52e9e958d3237821fbc750c", size = 252154, upload-time = "2025-12-28T15:41:09.969Z" }, + { url = "https://files.pythonhosted.org/packages/d0/bf/9939c5d6859c380e405b19e736321f1c7d402728792f4c752ad1adcce005/coverage-7.13.1-cp312-cp312-win32.whl", hash = "sha256:ff86d4e85188bba72cfb876df3e11fa243439882c55957184af44a35bd5880b7", size = 221487, upload-time = "2025-12-28T15:41:11.468Z" }, + { url = "https://files.pythonhosted.org/packages/fa/dc/7282856a407c621c2aad74021680a01b23010bb8ebf427cf5eacda2e876f/coverage-7.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:16cc1da46c04fb0fb128b4dc430b78fa2aba8a6c0c9f8eb391fd5103409a6ac6", size = 222299, upload-time = "2025-12-28T15:41:13.386Z" }, + { url = "https://files.pythonhosted.org/packages/10/79/176a11203412c350b3e9578620013af35bcdb79b651eb976f4a4b32044fa/coverage-7.13.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d9bc218650022a768f3775dd7fdac1886437325d8d295d923ebcfef4892ad5c", size = 220941, upload-time = "2025-12-28T15:41:14.975Z" }, + { url = "https://files.pythonhosted.org/packages/a3/a4/e98e689347a1ff1a7f67932ab535cef82eb5e78f32a9e4132e114bbb3a0a/coverage-7.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cb237bfd0ef4d5eb6a19e29f9e528ac67ac3be932ea6b44fb6cc09b9f3ecff78", size = 218951, upload-time = "2025-12-28T15:41:16.653Z" }, + { url = "https://files.pythonhosted.org/packages/32/33/7cbfe2bdc6e2f03d6b240d23dc45fdaf3fd270aaf2d640be77b7f16989ab/coverage-7.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1dcb645d7e34dcbcc96cd7c132b1fc55c39263ca62eb961c064eb3928997363b", size = 219325, upload-time = "2025-12-28T15:41:18.609Z" }, + { url = "https://files.pythonhosted.org/packages/59/f6/efdabdb4929487baeb7cb2a9f7dac457d9356f6ad1b255be283d58b16316/coverage-7.13.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3d42df8201e00384736f0df9be2ced39324c3907607d17d50d50116c989d84cd", size = 250309, upload-time = "2025-12-28T15:41:20.629Z" }, + { url = "https://files.pythonhosted.org/packages/12/da/91a52516e9d5aea87d32d1523f9cdcf7a35a3b298e6be05d6509ba3cfab2/coverage-7.13.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa3edde1aa8807de1d05934982416cb3ec46d1d4d91e280bcce7cca01c507992", size = 252907, upload-time = "2025-12-28T15:41:22.257Z" }, + { url = "https://files.pythonhosted.org/packages/75/38/f1ea837e3dc1231e086db1638947e00d264e7e8c41aa8ecacf6e1e0c05f4/coverage-7.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9edd0e01a343766add6817bc448408858ba6b489039eaaa2018474e4001651a4", size = 254148, upload-time = "2025-12-28T15:41:23.87Z" }, + { url = "https://files.pythonhosted.org/packages/7f/43/f4f16b881aaa34954ba446318dea6b9ed5405dd725dd8daac2358eda869a/coverage-7.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:985b7836931d033570b94c94713c6dba5f9d3ff26045f72c3e5dbc5fe3361e5a", size = 250515, upload-time = "2025-12-28T15:41:25.437Z" }, + { url = "https://files.pythonhosted.org/packages/84/34/8cba7f00078bd468ea914134e0144263194ce849ec3baad187ffb6203d1c/coverage-7.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ffed1e4980889765c84a5d1a566159e363b71d6b6fbaf0bebc9d3c30bc016766", size = 252292, upload-time = "2025-12-28T15:41:28.459Z" }, + { url = "https://files.pythonhosted.org/packages/8c/a4/cffac66c7652d84ee4ac52d3ccb94c015687d3b513f9db04bfcac2ac800d/coverage-7.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8842af7f175078456b8b17f1b73a0d16a65dcbdc653ecefeb00a56b3c8c298c4", size = 250242, upload-time = "2025-12-28T15:41:30.02Z" }, + { url = "https://files.pythonhosted.org/packages/f4/78/9a64d462263dde416f3c0067efade7b52b52796f489b1037a95b0dc389c9/coverage-7.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ccd7a6fca48ca9c131d9b0a2972a581e28b13416fc313fb98b6d24a03ce9a398", size = 250068, upload-time = "2025-12-28T15:41:32.007Z" }, + { url = "https://files.pythonhosted.org/packages/69/c8/a8994f5fece06db7c4a97c8fc1973684e178599b42e66280dded0524ef00/coverage-7.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0403f647055de2609be776965108447deb8e384fe4a553c119e3ff6bfbab4784", size = 251846, upload-time = "2025-12-28T15:41:33.946Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f7/91fa73c4b80305c86598a2d4e54ba22df6bf7d0d97500944af7ef155d9f7/coverage-7.13.1-cp313-cp313-win32.whl", hash = "sha256:549d195116a1ba1e1ae2f5ca143f9777800f6636eab917d4f02b5310d6d73461", size = 221512, upload-time = "2025-12-28T15:41:35.519Z" }, + { url = "https://files.pythonhosted.org/packages/45/0b/0768b4231d5a044da8f75e097a8714ae1041246bb765d6b5563bab456735/coverage-7.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:5899d28b5276f536fcf840b18b61a9fce23cc3aec1d114c44c07fe94ebeaa500", size = 222321, upload-time = "2025-12-28T15:41:37.371Z" }, + { url = "https://files.pythonhosted.org/packages/9b/b8/bdcb7253b7e85157282450262008f1366aa04663f3e3e4c30436f596c3e2/coverage-7.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:868a2fae76dfb06e87291bcbd4dcbcc778a8500510b618d50496e520bd94d9b9", size = 220949, upload-time = "2025-12-28T15:41:39.553Z" }, + { url = "https://files.pythonhosted.org/packages/70/52/f2be52cc445ff75ea8397948c96c1b4ee14f7f9086ea62fc929c5ae7b717/coverage-7.13.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67170979de0dacac3f3097d02b0ad188d8edcea44ccc44aaa0550af49150c7dc", size = 219643, upload-time = "2025-12-28T15:41:41.567Z" }, + { url = "https://files.pythonhosted.org/packages/47/79/c85e378eaa239e2edec0c5523f71542c7793fe3340954eafb0bc3904d32d/coverage-7.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f80e2bb21bfab56ed7405c2d79d34b5dc0bc96c2c1d2a067b643a09fb756c43a", size = 219997, upload-time = "2025-12-28T15:41:43.418Z" }, + { url = "https://files.pythonhosted.org/packages/fe/9b/b1ade8bfb653c0bbce2d6d6e90cc6c254cbb99b7248531cc76253cb4da6d/coverage-7.13.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f83351e0f7dcdb14d7326c3d8d8c4e915fa685cbfdc6281f9470d97a04e9dfe4", size = 261296, upload-time = "2025-12-28T15:41:45.207Z" }, + { url = "https://files.pythonhosted.org/packages/1f/af/ebf91e3e1a2473d523e87e87fd8581e0aa08741b96265730e2d79ce78d8d/coverage-7.13.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb3f6562e89bad0110afbe64e485aac2462efdce6232cdec7862a095dc3412f6", size = 263363, upload-time = "2025-12-28T15:41:47.163Z" }, + { url = "https://files.pythonhosted.org/packages/c4/8b/fb2423526d446596624ac7fde12ea4262e66f86f5120114c3cfd0bb2befa/coverage-7.13.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77545b5dcda13b70f872c3b5974ac64c21d05e65b1590b441c8560115dc3a0d1", size = 265783, upload-time = "2025-12-28T15:41:49.03Z" }, + { url = "https://files.pythonhosted.org/packages/9b/26/ef2adb1e22674913b89f0fe7490ecadcef4a71fa96f5ced90c60ec358789/coverage-7.13.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a4d240d260a1aed814790bbe1f10a5ff31ce6c21bc78f0da4a1e8268d6c80dbd", size = 260508, upload-time = "2025-12-28T15:41:51.035Z" }, + { url = "https://files.pythonhosted.org/packages/ce/7d/f0f59b3404caf662e7b5346247883887687c074ce67ba453ea08c612b1d5/coverage-7.13.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d2287ac9360dec3837bfdad969963a5d073a09a85d898bd86bea82aa8876ef3c", size = 263357, upload-time = "2025-12-28T15:41:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/1a/b1/29896492b0b1a047604d35d6fa804f12818fa30cdad660763a5f3159e158/coverage-7.13.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d2c11f3ea4db66b5cbded23b20185c35066892c67d80ec4be4bab257b9ad1e0", size = 260978, upload-time = "2025-12-28T15:41:54.589Z" }, + { url = "https://files.pythonhosted.org/packages/48/f2/971de1238a62e6f0a4128d37adadc8bb882ee96afbe03ff1570291754629/coverage-7.13.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:3fc6a169517ca0d7ca6846c3c5392ef2b9e38896f61d615cb75b9e7134d4ee1e", size = 259877, upload-time = "2025-12-28T15:41:56.263Z" }, + { url = "https://files.pythonhosted.org/packages/6a/fc/0474efcbb590ff8628830e9aaec5f1831594874360e3251f1fdec31d07a3/coverage-7.13.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d10a2ed46386e850bb3de503a54f9fe8192e5917fcbb143bfef653a9355e9a53", size = 262069, upload-time = "2025-12-28T15:41:58.093Z" }, + { url = "https://files.pythonhosted.org/packages/88/4f/3c159b7953db37a7b44c0eab8a95c37d1aa4257c47b4602c04022d5cb975/coverage-7.13.1-cp313-cp313t-win32.whl", hash = "sha256:75a6f4aa904301dab8022397a22c0039edc1f51e90b83dbd4464b8a38dc87842", size = 222184, upload-time = "2025-12-28T15:41:59.763Z" }, + { url = "https://files.pythonhosted.org/packages/58/a5/6b57d28f81417f9335774f20679d9d13b9a8fb90cd6160957aa3b54a2379/coverage-7.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:309ef5706e95e62578cda256b97f5e097916a2c26247c287bbe74794e7150df2", size = 223250, upload-time = "2025-12-28T15:42:01.52Z" }, + { url = "https://files.pythonhosted.org/packages/81/7c/160796f3b035acfbb58be80e02e484548595aa67e16a6345e7910ace0a38/coverage-7.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:92f980729e79b5d16d221038dbf2e8f9a9136afa072f9d5d6ed4cb984b126a09", size = 221521, upload-time = "2025-12-28T15:42:03.275Z" }, + { url = "https://files.pythonhosted.org/packages/aa/8e/ba0e597560c6563fc0adb902fda6526df5d4aa73bb10adf0574d03bd2206/coverage-7.13.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:97ab3647280d458a1f9adb85244e81587505a43c0c7cff851f5116cd2814b894", size = 218996, upload-time = "2025-12-28T15:42:04.978Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8e/764c6e116f4221dc7aa26c4061181ff92edb9c799adae6433d18eeba7a14/coverage-7.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8f572d989142e0908e6acf57ad1b9b86989ff057c006d13b76c146ec6a20216a", size = 219326, upload-time = "2025-12-28T15:42:06.691Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a6/6130dc6d8da28cdcbb0f2bf8865aeca9b157622f7c0031e48c6cf9a0e591/coverage-7.13.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d72140ccf8a147e94274024ff6fd8fb7811354cf7ef88b1f0a988ebaa5bc774f", size = 250374, upload-time = "2025-12-28T15:42:08.786Z" }, + { url = "https://files.pythonhosted.org/packages/82/2b/783ded568f7cd6b677762f780ad338bf4b4750205860c17c25f7c708995e/coverage-7.13.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3c9f051b028810f5a87c88e5d6e9af3c0ff32ef62763bf15d29f740453ca909", size = 252882, upload-time = "2025-12-28T15:42:10.515Z" }, + { url = "https://files.pythonhosted.org/packages/cd/b2/9808766d082e6a4d59eb0cc881a57fc1600eb2c5882813eefff8254f71b5/coverage-7.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f398ba4df52d30b1763f62eed9de5620dcde96e6f491f4c62686736b155aa6e4", size = 254218, upload-time = "2025-12-28T15:42:12.208Z" }, + { url = "https://files.pythonhosted.org/packages/44/ea/52a985bb447c871cb4d2e376e401116520991b597c85afdde1ea9ef54f2c/coverage-7.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:132718176cc723026d201e347f800cd1a9e4b62ccd3f82476950834dad501c75", size = 250391, upload-time = "2025-12-28T15:42:14.21Z" }, + { url = "https://files.pythonhosted.org/packages/7f/1d/125b36cc12310718873cfc8209ecfbc1008f14f4f5fa0662aa608e579353/coverage-7.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9e549d642426e3579b3f4b92d0431543b012dcb6e825c91619d4e93b7363c3f9", size = 252239, upload-time = "2025-12-28T15:42:16.292Z" }, + { url = "https://files.pythonhosted.org/packages/6a/16/10c1c164950cade470107f9f14bbac8485f8fb8515f515fca53d337e4a7f/coverage-7.13.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:90480b2134999301eea795b3a9dbf606c6fbab1b489150c501da84a959442465", size = 250196, upload-time = "2025-12-28T15:42:18.54Z" }, + { url = "https://files.pythonhosted.org/packages/2a/c6/cd860fac08780c6fd659732f6ced1b40b79c35977c1356344e44d72ba6c4/coverage-7.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e825dbb7f84dfa24663dd75835e7257f8882629fc11f03ecf77d84a75134b864", size = 250008, upload-time = "2025-12-28T15:42:20.365Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/a8c58d3d38f82a5711e1e0a67268362af48e1a03df27c03072ac30feefcf/coverage-7.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:623dcc6d7a7ba450bbdbeedbaa0c42b329bdae16491af2282f12a7e809be7eb9", size = 251671, upload-time = "2025-12-28T15:42:22.114Z" }, + { url = "https://files.pythonhosted.org/packages/f0/bc/fd4c1da651d037a1e3d53e8cb3f8182f4b53271ffa9a95a2e211bacc0349/coverage-7.13.1-cp314-cp314-win32.whl", hash = "sha256:6e73ebb44dca5f708dc871fe0b90cf4cff1a13f9956f747cc87b535a840386f5", size = 221777, upload-time = "2025-12-28T15:42:23.919Z" }, + { url = "https://files.pythonhosted.org/packages/4b/50/71acabdc8948464c17e90b5ffd92358579bd0910732c2a1c9537d7536aa6/coverage-7.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:be753b225d159feb397bd0bf91ae86f689bad0da09d3b301478cd39b878ab31a", size = 222592, upload-time = "2025-12-28T15:42:25.619Z" }, + { url = "https://files.pythonhosted.org/packages/f7/c8/a6fb943081bb0cc926499c7907731a6dc9efc2cbdc76d738c0ab752f1a32/coverage-7.13.1-cp314-cp314-win_arm64.whl", hash = "sha256:228b90f613b25ba0019361e4ab81520b343b622fc657daf7e501c4ed6a2366c0", size = 221169, upload-time = "2025-12-28T15:42:27.629Z" }, + { url = "https://files.pythonhosted.org/packages/16/61/d5b7a0a0e0e40d62e59bc8c7aa1afbd86280d82728ba97f0673b746b78e2/coverage-7.13.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:60cfb538fe9ef86e5b2ab0ca8fc8d62524777f6c611dcaf76dc16fbe9b8e698a", size = 219730, upload-time = "2025-12-28T15:42:29.306Z" }, + { url = "https://files.pythonhosted.org/packages/a3/2c/8881326445fd071bb49514d1ce97d18a46a980712b51fee84f9ab42845b4/coverage-7.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:57dfc8048c72ba48a8c45e188d811e5efd7e49b387effc8fb17e97936dde5bf6", size = 220001, upload-time = "2025-12-28T15:42:31.319Z" }, + { url = "https://files.pythonhosted.org/packages/b5/d7/50de63af51dfa3a7f91cc37ad8fcc1e244b734232fbc8b9ab0f3c834a5cd/coverage-7.13.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f2f725aa3e909b3c5fdb8192490bdd8e1495e85906af74fe6e34a2a77ba0673", size = 261370, upload-time = "2025-12-28T15:42:32.992Z" }, + { url = "https://files.pythonhosted.org/packages/e1/2c/d31722f0ec918fd7453b2758312729f645978d212b410cd0f7c2aed88a94/coverage-7.13.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9ee68b21909686eeb21dfcba2c3b81fee70dcf38b140dcd5aa70680995fa3aa5", size = 263485, upload-time = "2025-12-28T15:42:34.759Z" }, + { url = "https://files.pythonhosted.org/packages/fa/7a/2c114fa5c5fc08ba0777e4aec4c97e0b4a1afcb69c75f1f54cff78b073ab/coverage-7.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:724b1b270cb13ea2e6503476e34541a0b1f62280bc997eab443f87790202033d", size = 265890, upload-time = "2025-12-28T15:42:36.517Z" }, + { url = "https://files.pythonhosted.org/packages/65/d9/f0794aa1c74ceabc780fe17f6c338456bbc4e96bd950f2e969f48ac6fb20/coverage-7.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:916abf1ac5cf7eb16bc540a5bf75c71c43a676f5c52fcb9fe75a2bd75fb944e8", size = 260445, upload-time = "2025-12-28T15:42:38.646Z" }, + { url = "https://files.pythonhosted.org/packages/49/23/184b22a00d9bb97488863ced9454068c79e413cb23f472da6cbddc6cfc52/coverage-7.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:776483fd35b58d8afe3acbd9988d5de592ab6da2d2a865edfdbc9fdb43e7c486", size = 263357, upload-time = "2025-12-28T15:42:40.788Z" }, + { url = "https://files.pythonhosted.org/packages/7d/bd/58af54c0c9199ea4190284f389005779d7daf7bf3ce40dcd2d2b2f96da69/coverage-7.13.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b6f3b96617e9852703f5b633ea01315ca45c77e879584f283c44127f0f1ec564", size = 260959, upload-time = "2025-12-28T15:42:42.808Z" }, + { url = "https://files.pythonhosted.org/packages/4b/2a/6839294e8f78a4891bf1df79d69c536880ba2f970d0ff09e7513d6e352e9/coverage-7.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:bd63e7b74661fed317212fab774e2a648bc4bb09b35f25474f8e3325d2945cd7", size = 259792, upload-time = "2025-12-28T15:42:44.818Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c3/528674d4623283310ad676c5af7414b9850ab6d55c2300e8aa4b945ec554/coverage-7.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:933082f161bbb3e9f90d00990dc956120f608cdbcaeea15c4d897f56ef4fe416", size = 262123, upload-time = "2025-12-28T15:42:47.108Z" }, + { url = "https://files.pythonhosted.org/packages/06/c5/8c0515692fb4c73ac379d8dc09b18eaf0214ecb76ea6e62467ba7a1556ff/coverage-7.13.1-cp314-cp314t-win32.whl", hash = "sha256:18be793c4c87de2965e1c0f060f03d9e5aff66cfeae8e1dbe6e5b88056ec153f", size = 222562, upload-time = "2025-12-28T15:42:49.144Z" }, + { url = "https://files.pythonhosted.org/packages/05/0e/c0a0c4678cb30dac735811db529b321d7e1c9120b79bd728d4f4d6b010e9/coverage-7.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:0e42e0ec0cd3e0d851cb3c91f770c9301f48647cb2877cb78f74bdaa07639a79", size = 223670, upload-time = "2025-12-28T15:42:51.218Z" }, + { url = "https://files.pythonhosted.org/packages/f5/5f/b177aa0011f354abf03a8f30a85032686d290fdeed4222b27d36b4372a50/coverage-7.13.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eaecf47ef10c72ece9a2a92118257da87e460e113b83cc0d2905cbbe931792b4", size = 221707, upload-time = "2025-12-28T15:42:53.034Z" }, + { url = "https://files.pythonhosted.org/packages/cc/48/d9f421cb8da5afaa1a64570d9989e00fb7955e6acddc5a12979f7666ef60/coverage-7.13.1-py3-none-any.whl", hash = "sha256:2016745cb3ba554469d02819d78958b571792bb68e31302610e898f80dd3a573", size = 210722, upload-time = "2025-12-28T15:42:54.901Z" }, +] + +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "librt" +version = "0.7.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/29/47f29026ca17f35cf299290292d5f8331f5077364974b7675a353179afa2/librt-0.7.7.tar.gz", hash = "sha256:81d957b069fed1890953c3b9c3895c7689960f233eea9a1d9607f71ce7f00b2c", size = 145910, upload-time = "2026-01-01T23:52:22.87Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/84/2cfb1f3b9b60bab52e16a220c931223fc8e963d0d7bb9132bef012aafc3f/librt-0.7.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4836c5645f40fbdc275e5670819bde5ab5f2e882290d304e3c6ddab1576a6d0", size = 54709, upload-time = "2026-01-01T23:50:48.326Z" }, + { url = "https://files.pythonhosted.org/packages/19/a1/3127b277e9d3784a8040a54e8396d9ae5c64d6684dc6db4b4089b0eedcfb/librt-0.7.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae8aec43117a645a31e5f60e9e3a0797492e747823b9bda6972d521b436b4e8", size = 56658, upload-time = "2026-01-01T23:50:49.74Z" }, + { url = "https://files.pythonhosted.org/packages/3a/e9/b91b093a5c42eb218120445f3fef82e0b977fa2225f4d6fc133d25cdf86a/librt-0.7.7-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:aea05f701ccd2a76b34f0daf47ca5068176ff553510b614770c90d76ac88df06", size = 161026, upload-time = "2026-01-01T23:50:50.853Z" }, + { url = "https://files.pythonhosted.org/packages/c7/cb/1ded77d5976a79d7057af4a010d577ce4f473ff280984e68f4974a3281e5/librt-0.7.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b16ccaeff0ed4355dfb76fe1ea7a5d6d03b5ad27f295f77ee0557bc20a72495", size = 169529, upload-time = "2026-01-01T23:50:52.24Z" }, + { url = "https://files.pythonhosted.org/packages/da/6e/6ca5bdaa701e15f05000ac1a4c5d1475c422d3484bd3d1ca9e8c2f5be167/librt-0.7.7-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48c7e150c095d5e3cea7452347ba26094be905d6099d24f9319a8b475fcd3e0", size = 183271, upload-time = "2026-01-01T23:50:55.287Z" }, + { url = "https://files.pythonhosted.org/packages/e7/2d/55c0e38073997b4bbb5ddff25b6d1bbba8c2f76f50afe5bb9c844b702f34/librt-0.7.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4dcee2f921a8632636d1c37f1bbdb8841d15666d119aa61e5399c5268e7ce02e", size = 179039, upload-time = "2026-01-01T23:50:56.807Z" }, + { url = "https://files.pythonhosted.org/packages/33/4e/3662a41ae8bb81b226f3968426293517b271d34d4e9fd4b59fc511f1ae40/librt-0.7.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:14ef0f4ac3728ffd85bfc58e2f2f48fb4ef4fa871876f13a73a7381d10a9f77c", size = 173505, upload-time = "2026-01-01T23:50:58.291Z" }, + { url = "https://files.pythonhosted.org/packages/f8/5d/cf768deb8bdcbac5f8c21fcb32dd483d038d88c529fd351bbe50590b945d/librt-0.7.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e4ab69fa37f8090f2d971a5d2bc606c7401170dbdae083c393d6cbf439cb45b8", size = 193570, upload-time = "2026-01-01T23:50:59.546Z" }, + { url = "https://files.pythonhosted.org/packages/a1/ea/ee70effd13f1d651976d83a2812391f6203971740705e3c0900db75d4bce/librt-0.7.7-cp310-cp310-win32.whl", hash = "sha256:4bf3cc46d553693382d2abf5f5bd493d71bb0f50a7c0beab18aa13a5545c8900", size = 42600, upload-time = "2026-01-01T23:51:00.694Z" }, + { url = "https://files.pythonhosted.org/packages/f0/eb/dc098730f281cba76c279b71783f5de2edcba3b880c1ab84a093ef826062/librt-0.7.7-cp310-cp310-win_amd64.whl", hash = "sha256:f0c8fe5aeadd8a0e5b0598f8a6ee3533135ca50fd3f20f130f9d72baf5c6ac58", size = 48977, upload-time = "2026-01-01T23:51:01.726Z" }, + { url = "https://files.pythonhosted.org/packages/f0/56/30b5c342518005546df78841cb0820ae85a17e7d07d521c10ef367306d0d/librt-0.7.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a487b71fbf8a9edb72a8c7a456dda0184642d99cd007bc819c0b7ab93676a8ee", size = 54709, upload-time = "2026-01-01T23:51:02.774Z" }, + { url = "https://files.pythonhosted.org/packages/72/78/9f120e3920b22504d4f3835e28b55acc2cc47c9586d2e1b6ba04c3c1bf01/librt-0.7.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4d4efb218264ecf0f8516196c9e2d1a0679d9fb3bb15df1155a35220062eba8", size = 56663, upload-time = "2026-01-01T23:51:03.838Z" }, + { url = "https://files.pythonhosted.org/packages/1c/ea/7d7a1ee7dfc1151836028eba25629afcf45b56bbc721293e41aa2e9b8934/librt-0.7.7-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b8bb331aad734b059c4b450cd0a225652f16889e286b2345af5e2c3c625c3d85", size = 161705, upload-time = "2026-01-01T23:51:04.917Z" }, + { url = "https://files.pythonhosted.org/packages/45/a5/952bc840ac8917fbcefd6bc5f51ad02b89721729814f3e2bfcc1337a76d6/librt-0.7.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:467dbd7443bda08338fc8ad701ed38cef48194017554f4c798b0a237904b3f99", size = 171029, upload-time = "2026-01-01T23:51:06.09Z" }, + { url = "https://files.pythonhosted.org/packages/fa/bf/c017ff7da82dc9192cf40d5e802a48a25d00e7639b6465cfdcee5893a22c/librt-0.7.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50d1d1ee813d2d1a3baf2873634ba506b263032418d16287c92ec1cc9c1a00cb", size = 184704, upload-time = "2026-01-01T23:51:07.549Z" }, + { url = "https://files.pythonhosted.org/packages/77/ec/72f3dd39d2cdfd6402ab10836dc9cbf854d145226062a185b419c4f1624a/librt-0.7.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c7e5070cf3ec92d98f57574da0224f8c73faf1ddd6d8afa0b8c9f6e86997bc74", size = 180719, upload-time = "2026-01-01T23:51:09.062Z" }, + { url = "https://files.pythonhosted.org/packages/78/86/06e7a1a81b246f3313bf515dd9613a1c81583e6fd7843a9f4d625c4e926d/librt-0.7.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bdb9f3d865b2dafe7f9ad7f30ef563c80d0ddd2fdc8cc9b8e4f242f475e34d75", size = 174537, upload-time = "2026-01-01T23:51:10.611Z" }, + { url = "https://files.pythonhosted.org/packages/83/08/f9fb2edc9c7a76e95b2924ce81d545673f5b034e8c5dd92159d1c7dae0c6/librt-0.7.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8185c8497d45164e256376f9da5aed2bb26ff636c798c9dabe313b90e9f25b28", size = 195238, upload-time = "2026-01-01T23:51:11.762Z" }, + { url = "https://files.pythonhosted.org/packages/ba/56/ea2d2489d3ea1f47b301120e03a099e22de7b32c93df9a211e6ff4f9bf38/librt-0.7.7-cp311-cp311-win32.whl", hash = "sha256:44d63ce643f34a903f09ff7ca355aae019a3730c7afd6a3c037d569beeb5d151", size = 42939, upload-time = "2026-01-01T23:51:13.192Z" }, + { url = "https://files.pythonhosted.org/packages/58/7b/c288f417e42ba2a037f1c0753219e277b33090ed4f72f292fb6fe175db4c/librt-0.7.7-cp311-cp311-win_amd64.whl", hash = "sha256:7d13cc340b3b82134f8038a2bfe7137093693dcad8ba5773da18f95ad6b77a8a", size = 49240, upload-time = "2026-01-01T23:51:14.264Z" }, + { url = "https://files.pythonhosted.org/packages/7c/24/738eb33a6c1516fdb2dfd2a35db6e5300f7616679b573585be0409bc6890/librt-0.7.7-cp311-cp311-win_arm64.whl", hash = "sha256:983de36b5a83fe9222f4f7dcd071f9b1ac6f3f17c0af0238dadfb8229588f890", size = 42613, upload-time = "2026-01-01T23:51:15.268Z" }, + { url = "https://files.pythonhosted.org/packages/56/72/1cd9d752070011641e8aee046c851912d5f196ecd726fffa7aed2070f3e0/librt-0.7.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a85a1fc4ed11ea0eb0a632459ce004a2d14afc085a50ae3463cd3dfe1ce43fc", size = 55687, upload-time = "2026-01-01T23:51:16.291Z" }, + { url = "https://files.pythonhosted.org/packages/50/aa/d5a1d4221c4fe7e76ae1459d24d6037783cb83c7645164c07d7daf1576ec/librt-0.7.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c87654e29a35938baead1c4559858f346f4a2a7588574a14d784f300ffba0efd", size = 57136, upload-time = "2026-01-01T23:51:17.363Z" }, + { url = "https://files.pythonhosted.org/packages/23/6f/0c86b5cb5e7ef63208c8cc22534df10ecc5278efc0d47fb8815577f3ca2f/librt-0.7.7-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c9faaebb1c6212c20afd8043cd6ed9de0a47d77f91a6b5b48f4e46ed470703fe", size = 165320, upload-time = "2026-01-01T23:51:18.455Z" }, + { url = "https://files.pythonhosted.org/packages/16/37/df4652690c29f645ffe405b58285a4109e9fe855c5bb56e817e3e75840b3/librt-0.7.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1908c3e5a5ef86b23391448b47759298f87f997c3bd153a770828f58c2bb4630", size = 174216, upload-time = "2026-01-01T23:51:19.599Z" }, + { url = "https://files.pythonhosted.org/packages/9a/d6/d3afe071910a43133ec9c0f3e4ce99ee6df0d4e44e4bddf4b9e1c6ed41cc/librt-0.7.7-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbc4900e95a98fc0729523be9d93a8fedebb026f32ed9ffc08acd82e3e181503", size = 189005, upload-time = "2026-01-01T23:51:21.052Z" }, + { url = "https://files.pythonhosted.org/packages/d5/18/74060a870fe2d9fd9f47824eba6717ce7ce03124a0d1e85498e0e7efc1b2/librt-0.7.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7ea4e1fbd253e5c68ea0fe63d08577f9d288a73f17d82f652ebc61fa48d878d", size = 183961, upload-time = "2026-01-01T23:51:22.493Z" }, + { url = "https://files.pythonhosted.org/packages/7c/5e/918a86c66304af66a3c1d46d54df1b2d0b8894babc42a14fb6f25511497f/librt-0.7.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ef7699b7a5a244b1119f85c5bbc13f152cd38240cbb2baa19b769433bae98e50", size = 177610, upload-time = "2026-01-01T23:51:23.874Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d7/b5e58dc2d570f162e99201b8c0151acf40a03a39c32ab824dd4febf12736/librt-0.7.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:955c62571de0b181d9e9e0a0303c8bc90d47670a5eff54cf71bf5da61d1899cf", size = 199272, upload-time = "2026-01-01T23:51:25.341Z" }, + { url = "https://files.pythonhosted.org/packages/18/87/8202c9bd0968bdddc188ec3811985f47f58ed161b3749299f2c0dd0f63fb/librt-0.7.7-cp312-cp312-win32.whl", hash = "sha256:1bcd79be209313b270b0e1a51c67ae1af28adad0e0c7e84c3ad4b5cb57aaa75b", size = 43189, upload-time = "2026-01-01T23:51:26.799Z" }, + { url = "https://files.pythonhosted.org/packages/61/8d/80244b267b585e7aa79ffdac19f66c4861effc3a24598e77909ecdd0850e/librt-0.7.7-cp312-cp312-win_amd64.whl", hash = "sha256:4353ee891a1834567e0302d4bd5e60f531912179578c36f3d0430f8c5e16b456", size = 49462, upload-time = "2026-01-01T23:51:27.813Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1f/75db802d6a4992d95e8a889682601af9b49d5a13bbfa246d414eede1b56c/librt-0.7.7-cp312-cp312-win_arm64.whl", hash = "sha256:a76f1d679beccccdf8c1958e732a1dfcd6e749f8821ee59d7bec009ac308c029", size = 42828, upload-time = "2026-01-01T23:51:28.804Z" }, + { url = "https://files.pythonhosted.org/packages/8d/5e/d979ccb0a81407ec47c14ea68fb217ff4315521730033e1dd9faa4f3e2c1/librt-0.7.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f4a0b0a3c86ba9193a8e23bb18f100d647bf192390ae195d84dfa0a10fb6244", size = 55746, upload-time = "2026-01-01T23:51:29.828Z" }, + { url = "https://files.pythonhosted.org/packages/f5/2c/3b65861fb32f802c3783d6ac66fc5589564d07452a47a8cf9980d531cad3/librt-0.7.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5335890fea9f9e6c4fdf8683061b9ccdcbe47c6dc03ab8e9b68c10acf78be78d", size = 57174, upload-time = "2026-01-01T23:51:31.226Z" }, + { url = "https://files.pythonhosted.org/packages/50/df/030b50614b29e443607220097ebaf438531ea218c7a9a3e21ea862a919cd/librt-0.7.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b4346b1225be26def3ccc6c965751c74868f0578cbcba293c8ae9168483d811", size = 165834, upload-time = "2026-01-01T23:51:32.278Z" }, + { url = "https://files.pythonhosted.org/packages/5d/e1/bd8d1eacacb24be26a47f157719553bbd1b3fe812c30dddf121c0436fd0b/librt-0.7.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a10b8eebdaca6e9fdbaf88b5aefc0e324b763a5f40b1266532590d5afb268a4c", size = 174819, upload-time = "2026-01-01T23:51:33.461Z" }, + { url = "https://files.pythonhosted.org/packages/46/7d/91d6c3372acf54a019c1ad8da4c9ecf4fc27d039708880bf95f48dbe426a/librt-0.7.7-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:067be973d90d9e319e6eb4ee2a9b9307f0ecd648b8a9002fa237289a4a07a9e7", size = 189607, upload-time = "2026-01-01T23:51:34.604Z" }, + { url = "https://files.pythonhosted.org/packages/fa/ac/44604d6d3886f791fbd1c6ae12d5a782a8f4aca927484731979f5e92c200/librt-0.7.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:23d2299ed007812cccc1ecef018db7d922733382561230de1f3954db28433977", size = 184586, upload-time = "2026-01-01T23:51:35.845Z" }, + { url = "https://files.pythonhosted.org/packages/5c/26/d8a6e4c17117b7f9b83301319d9a9de862ae56b133efb4bad8b3aa0808c9/librt-0.7.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6b6f8ea465524aa4c7420c7cc4ca7d46fe00981de8debc67b1cc2e9957bb5b9d", size = 178251, upload-time = "2026-01-01T23:51:37.018Z" }, + { url = "https://files.pythonhosted.org/packages/99/ab/98d857e254376f8e2f668e807daccc1f445e4b4fc2f6f9c1cc08866b0227/librt-0.7.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8df32a99cc46eb0ee90afd9ada113ae2cafe7e8d673686cf03ec53e49635439", size = 199853, upload-time = "2026-01-01T23:51:38.195Z" }, + { url = "https://files.pythonhosted.org/packages/7c/55/4523210d6ae5134a5da959900be43ad8bab2e4206687b6620befddb5b5fd/librt-0.7.7-cp313-cp313-win32.whl", hash = "sha256:86f86b3b785487c7760247bcdac0b11aa8bf13245a13ed05206286135877564b", size = 43247, upload-time = "2026-01-01T23:51:39.629Z" }, + { url = "https://files.pythonhosted.org/packages/25/40/3ec0fed5e8e9297b1cf1a3836fb589d3de55f9930e3aba988d379e8ef67c/librt-0.7.7-cp313-cp313-win_amd64.whl", hash = "sha256:4862cb2c702b1f905c0503b72d9d4daf65a7fdf5a9e84560e563471e57a56949", size = 49419, upload-time = "2026-01-01T23:51:40.674Z" }, + { url = "https://files.pythonhosted.org/packages/1c/7a/aab5f0fb122822e2acbc776addf8b9abfb4944a9056c00c393e46e543177/librt-0.7.7-cp313-cp313-win_arm64.whl", hash = "sha256:0996c83b1cb43c00e8c87835a284f9057bc647abd42b5871e5f941d30010c832", size = 42828, upload-time = "2026-01-01T23:51:41.731Z" }, + { url = "https://files.pythonhosted.org/packages/69/9c/228a5c1224bd23809a635490a162e9cbdc68d99f0eeb4a696f07886b8206/librt-0.7.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:23daa1ab0512bafdd677eb1bfc9611d8ffbe2e328895671e64cb34166bc1b8c8", size = 55188, upload-time = "2026-01-01T23:51:43.14Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c2/0e7c6067e2b32a156308205e5728f4ed6478c501947e9142f525afbc6bd2/librt-0.7.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:558a9e5a6f3cc1e20b3168fb1dc802d0d8fa40731f6e9932dcc52bbcfbd37111", size = 56895, upload-time = "2026-01-01T23:51:44.534Z" }, + { url = "https://files.pythonhosted.org/packages/0e/77/de50ff70c80855eb79d1d74035ef06f664dd073fb7fb9d9fb4429651b8eb/librt-0.7.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2567cb48dc03e5b246927ab35cbb343376e24501260a9b5e30b8e255dca0d1d2", size = 163724, upload-time = "2026-01-01T23:51:45.571Z" }, + { url = "https://files.pythonhosted.org/packages/6e/19/f8e4bf537899bdef9e0bb9f0e4b18912c2d0f858ad02091b6019864c9a6d/librt-0.7.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6066c638cdf85ff92fc6f932d2d73c93a0e03492cdfa8778e6d58c489a3d7259", size = 172470, upload-time = "2026-01-01T23:51:46.823Z" }, + { url = "https://files.pythonhosted.org/packages/42/4c/dcc575b69d99076768e8dd6141d9aecd4234cba7f0e09217937f52edb6ed/librt-0.7.7-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a609849aca463074c17de9cda173c276eb8fee9e441053529e7b9e249dc8b8ee", size = 186806, upload-time = "2026-01-01T23:51:48.009Z" }, + { url = "https://files.pythonhosted.org/packages/fe/f8/4094a2b7816c88de81239a83ede6e87f1138477d7ee956c30f136009eb29/librt-0.7.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:add4e0a000858fe9bb39ed55f31085506a5c38363e6eb4a1e5943a10c2bfc3d1", size = 181809, upload-time = "2026-01-01T23:51:49.35Z" }, + { url = "https://files.pythonhosted.org/packages/1b/ac/821b7c0ab1b5a6cd9aee7ace8309c91545a2607185101827f79122219a7e/librt-0.7.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a3bfe73a32bd0bdb9a87d586b05a23c0a1729205d79df66dee65bb2e40d671ba", size = 175597, upload-time = "2026-01-01T23:51:50.636Z" }, + { url = "https://files.pythonhosted.org/packages/71/f9/27f6bfbcc764805864c04211c6ed636fe1d58f57a7b68d1f4ae5ed74e0e0/librt-0.7.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0ecce0544d3db91a40f8b57ae26928c02130a997b540f908cefd4d279d6c5848", size = 196506, upload-time = "2026-01-01T23:51:52.535Z" }, + { url = "https://files.pythonhosted.org/packages/46/ba/c9b9c6fc931dd7ea856c573174ccaf48714905b1a7499904db2552e3bbaf/librt-0.7.7-cp314-cp314-win32.whl", hash = "sha256:8f7a74cf3a80f0c3b0ec75b0c650b2f0a894a2cec57ef75f6f72c1e82cdac61d", size = 39747, upload-time = "2026-01-01T23:51:53.683Z" }, + { url = "https://files.pythonhosted.org/packages/c5/69/cd1269337c4cde3ee70176ee611ab0058aa42fc8ce5c9dce55f48facfcd8/librt-0.7.7-cp314-cp314-win_amd64.whl", hash = "sha256:3d1fe2e8df3268dd6734dba33ededae72ad5c3a859b9577bc00b715759c5aaab", size = 45971, upload-time = "2026-01-01T23:51:54.697Z" }, + { url = "https://files.pythonhosted.org/packages/79/fd/e0844794423f5583108c5991313c15e2b400995f44f6ec6871f8aaf8243c/librt-0.7.7-cp314-cp314-win_arm64.whl", hash = "sha256:2987cf827011907d3dfd109f1be0d61e173d68b1270107bb0e89f2fca7f2ed6b", size = 39075, upload-time = "2026-01-01T23:51:55.726Z" }, + { url = "https://files.pythonhosted.org/packages/42/02/211fd8f7c381e7b2a11d0fdfcd410f409e89967be2e705983f7c6342209a/librt-0.7.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8e92c8de62b40bfce91d5e12c6e8b15434da268979b1af1a6589463549d491e6", size = 57368, upload-time = "2026-01-01T23:51:56.706Z" }, + { url = "https://files.pythonhosted.org/packages/4c/b6/aca257affae73ece26041ae76032153266d110453173f67d7603058e708c/librt-0.7.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f683dcd49e2494a7535e30f779aa1ad6e3732a019d80abe1309ea91ccd3230e3", size = 59238, upload-time = "2026-01-01T23:51:58.066Z" }, + { url = "https://files.pythonhosted.org/packages/96/47/7383a507d8e0c11c78ca34c9d36eab9000db5989d446a2f05dc40e76c64f/librt-0.7.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b15e5d17812d4d629ff576699954f74e2cc24a02a4fc401882dd94f81daba45", size = 183870, upload-time = "2026-01-01T23:51:59.204Z" }, + { url = "https://files.pythonhosted.org/packages/a4/b8/50f3d8eec8efdaf79443963624175c92cec0ba84827a66b7fcfa78598e51/librt-0.7.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c084841b879c4d9b9fa34e5d5263994f21aea7fd9c6add29194dbb41a6210536", size = 194608, upload-time = "2026-01-01T23:52:00.419Z" }, + { url = "https://files.pythonhosted.org/packages/23/d9/1b6520793aadb59d891e3b98ee057a75de7f737e4a8b4b37fdbecb10d60f/librt-0.7.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c8fb9966f84737115513fecbaf257f9553d067a7dd45a69c2c7e5339e6a8dc", size = 206776, upload-time = "2026-01-01T23:52:01.705Z" }, + { url = "https://files.pythonhosted.org/packages/ff/db/331edc3bba929d2756fa335bfcf736f36eff4efcb4f2600b545a35c2ae58/librt-0.7.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9b5fb1ecb2c35362eab2dbd354fd1efa5a8440d3e73a68be11921042a0edc0ff", size = 203206, upload-time = "2026-01-01T23:52:03.315Z" }, + { url = "https://files.pythonhosted.org/packages/b2/e1/6af79ec77204e85f6f2294fc171a30a91bb0e35d78493532ed680f5d98be/librt-0.7.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:d1454899909d63cc9199a89fcc4f81bdd9004aef577d4ffc022e600c412d57f3", size = 196697, upload-time = "2026-01-01T23:52:04.857Z" }, + { url = "https://files.pythonhosted.org/packages/f3/46/de55ecce4b2796d6d243295c221082ca3a944dc2fb3a52dcc8660ce7727d/librt-0.7.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7ef28f2e7a016b29792fe0a2dd04dec75725b32a1264e390c366103f834a9c3a", size = 217193, upload-time = "2026-01-01T23:52:06.159Z" }, + { url = "https://files.pythonhosted.org/packages/41/61/33063e271949787a2f8dd33c5260357e3d512a114fc82ca7890b65a76e2d/librt-0.7.7-cp314-cp314t-win32.whl", hash = "sha256:5e419e0db70991b6ba037b70c1d5bbe92b20ddf82f31ad01d77a347ed9781398", size = 40277, upload-time = "2026-01-01T23:52:07.625Z" }, + { url = "https://files.pythonhosted.org/packages/06/21/1abd972349f83a696ea73159ac964e63e2d14086fdd9bc7ca878c25fced4/librt-0.7.7-cp314-cp314t-win_amd64.whl", hash = "sha256:d6b7d93657332c817b8d674ef6bf1ab7796b4f7ce05e420fd45bd258a72ac804", size = 46765, upload-time = "2026-01-01T23:52:08.647Z" }, + { url = "https://files.pythonhosted.org/packages/51/0e/b756c7708143a63fca65a51ca07990fa647db2cc8fcd65177b9e96680255/librt-0.7.7-cp314-cp314t-win_arm64.whl", hash = "sha256:142c2cd91794b79fd0ce113bd658993b7ede0fe93057668c2f98a45ca00b7e91", size = 39724, upload-time = "2026-01-01T23:52:09.745Z" }, +] + +[[package]] +name = "mypy" +version = "1.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/db/4efed9504bc01309ab9c2da7e352cc223569f05478012b5d9ece38fd44d2/mypy-1.19.1.tar.gz", hash = "sha256:19d88bb05303fe63f71dd2c6270daca27cb9401c4ca8255fe50d1d920e0eb9ba", size = 3582404, upload-time = "2025-12-15T05:03:48.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/63/e499890d8e39b1ff2df4c0c6ce5d371b6844ee22b8250687a99fd2f657a8/mypy-1.19.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5f05aa3d375b385734388e844bc01733bd33c644ab48e9684faa54e5389775ec", size = 13101333, upload-time = "2025-12-15T05:03:03.28Z" }, + { url = "https://files.pythonhosted.org/packages/72/4b/095626fc136fba96effc4fd4a82b41d688ab92124f8c4f7564bffe5cf1b0/mypy-1.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:022ea7279374af1a5d78dfcab853fe6a536eebfda4b59deab53cd21f6cd9f00b", size = 12164102, upload-time = "2025-12-15T05:02:33.611Z" }, + { url = "https://files.pythonhosted.org/packages/0c/5b/952928dd081bf88a83a5ccd49aaecfcd18fd0d2710c7ff07b8fb6f7032b9/mypy-1.19.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee4c11e460685c3e0c64a4c5de82ae143622410950d6be863303a1c4ba0e36d6", size = 12765799, upload-time = "2025-12-15T05:03:28.44Z" }, + { url = "https://files.pythonhosted.org/packages/2a/0d/93c2e4a287f74ef11a66fb6d49c7a9f05e47b0a4399040e6719b57f500d2/mypy-1.19.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de759aafbae8763283b2ee5869c7255391fbc4de3ff171f8f030b5ec48381b74", size = 13522149, upload-time = "2025-12-15T05:02:36.011Z" }, + { url = "https://files.pythonhosted.org/packages/7b/0e/33a294b56aaad2b338d203e3a1d8b453637ac36cb278b45005e0901cf148/mypy-1.19.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ab43590f9cd5108f41aacf9fca31841142c786827a74ab7cc8a2eacb634e09a1", size = 13810105, upload-time = "2025-12-15T05:02:40.327Z" }, + { url = "https://files.pythonhosted.org/packages/0e/fd/3e82603a0cb66b67c5e7abababce6bf1a929ddf67bf445e652684af5c5a0/mypy-1.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:2899753e2f61e571b3971747e302d5f420c3fd09650e1951e99f823bc3089dac", size = 10057200, upload-time = "2025-12-15T05:02:51.012Z" }, + { url = "https://files.pythonhosted.org/packages/ef/47/6b3ebabd5474d9cdc170d1342fbf9dddc1b0ec13ec90bf9004ee6f391c31/mypy-1.19.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d8dfc6ab58ca7dda47d9237349157500468e404b17213d44fc1cb77bce532288", size = 13028539, upload-time = "2025-12-15T05:03:44.129Z" }, + { url = "https://files.pythonhosted.org/packages/5c/a6/ac7c7a88a3c9c54334f53a941b765e6ec6c4ebd65d3fe8cdcfbe0d0fd7db/mypy-1.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e3f276d8493c3c97930e354b2595a44a21348b320d859fb4a2b9f66da9ed27ab", size = 12083163, upload-time = "2025-12-15T05:03:37.679Z" }, + { url = "https://files.pythonhosted.org/packages/67/af/3afa9cf880aa4a2c803798ac24f1d11ef72a0c8079689fac5cfd815e2830/mypy-1.19.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2abb24cf3f17864770d18d673c85235ba52456b36a06b6afc1e07c1fdcd3d0e6", size = 12687629, upload-time = "2025-12-15T05:02:31.526Z" }, + { url = "https://files.pythonhosted.org/packages/2d/46/20f8a7114a56484ab268b0ab372461cb3a8f7deed31ea96b83a4e4cfcfca/mypy-1.19.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a009ffa5a621762d0c926a078c2d639104becab69e79538a494bcccb62cc0331", size = 13436933, upload-time = "2025-12-15T05:03:15.606Z" }, + { url = "https://files.pythonhosted.org/packages/5b/f8/33b291ea85050a21f15da910002460f1f445f8007adb29230f0adea279cb/mypy-1.19.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f7cee03c9a2e2ee26ec07479f38ea9c884e301d42c6d43a19d20fb014e3ba925", size = 13661754, upload-time = "2025-12-15T05:02:26.731Z" }, + { url = "https://files.pythonhosted.org/packages/fd/a3/47cbd4e85bec4335a9cd80cf67dbc02be21b5d4c9c23ad6b95d6c5196bac/mypy-1.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:4b84a7a18f41e167f7995200a1d07a4a6810e89d29859df936f1c3923d263042", size = 10055772, upload-time = "2025-12-15T05:03:26.179Z" }, + { url = "https://files.pythonhosted.org/packages/06/8a/19bfae96f6615aa8a0604915512e0289b1fad33d5909bf7244f02935d33a/mypy-1.19.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8174a03289288c1f6c46d55cef02379b478bfbc8e358e02047487cad44c6ca1", size = 13206053, upload-time = "2025-12-15T05:03:46.622Z" }, + { url = "https://files.pythonhosted.org/packages/a5/34/3e63879ab041602154ba2a9f99817bb0c85c4df19a23a1443c8986e4d565/mypy-1.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ffcebe56eb09ff0c0885e750036a095e23793ba6c2e894e7e63f6d89ad51f22e", size = 12219134, upload-time = "2025-12-15T05:03:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/89/cc/2db6f0e95366b630364e09845672dbee0cbf0bbe753a204b29a944967cd9/mypy-1.19.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b64d987153888790bcdb03a6473d321820597ab8dd9243b27a92153c4fa50fd2", size = 12731616, upload-time = "2025-12-15T05:02:44.725Z" }, + { url = "https://files.pythonhosted.org/packages/00/be/dd56c1fd4807bc1eba1cf18b2a850d0de7bacb55e158755eb79f77c41f8e/mypy-1.19.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c35d298c2c4bba75feb2195655dfea8124d855dfd7343bf8b8c055421eaf0cf8", size = 13620847, upload-time = "2025-12-15T05:03:39.633Z" }, + { url = "https://files.pythonhosted.org/packages/6d/42/332951aae42b79329f743bf1da088cd75d8d4d9acc18fbcbd84f26c1af4e/mypy-1.19.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:34c81968774648ab5ac09c29a375fdede03ba253f8f8287847bd480782f73a6a", size = 13834976, upload-time = "2025-12-15T05:03:08.786Z" }, + { url = "https://files.pythonhosted.org/packages/6f/63/e7493e5f90e1e085c562bb06e2eb32cae27c5057b9653348d38b47daaecc/mypy-1.19.1-cp312-cp312-win_amd64.whl", hash = "sha256:b10e7c2cd7870ba4ad9b2d8a6102eb5ffc1f16ca35e3de6bfa390c1113029d13", size = 10118104, upload-time = "2025-12-15T05:03:10.834Z" }, + { url = "https://files.pythonhosted.org/packages/de/9f/a6abae693f7a0c697dbb435aac52e958dc8da44e92e08ba88d2e42326176/mypy-1.19.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e3157c7594ff2ef1634ee058aafc56a82db665c9438fd41b390f3bde1ab12250", size = 13201927, upload-time = "2025-12-15T05:02:29.138Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a4/45c35ccf6e1c65afc23a069f50e2c66f46bd3798cbe0d680c12d12935caa/mypy-1.19.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdb12f69bcc02700c2b47e070238f42cb87f18c0bc1fc4cdb4fb2bc5fd7a3b8b", size = 12206730, upload-time = "2025-12-15T05:03:01.325Z" }, + { url = "https://files.pythonhosted.org/packages/05/bb/cdcf89678e26b187650512620eec8368fded4cfd99cfcb431e4cdfd19dec/mypy-1.19.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f859fb09d9583a985be9a493d5cfc5515b56b08f7447759a0c5deaf68d80506e", size = 12724581, upload-time = "2025-12-15T05:03:20.087Z" }, + { url = "https://files.pythonhosted.org/packages/d1/32/dd260d52babf67bad8e6770f8e1102021877ce0edea106e72df5626bb0ec/mypy-1.19.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9a6538e0415310aad77cb94004ca6482330fece18036b5f360b62c45814c4ef", size = 13616252, upload-time = "2025-12-15T05:02:49.036Z" }, + { url = "https://files.pythonhosted.org/packages/71/d0/5e60a9d2e3bd48432ae2b454b7ef2b62a960ab51292b1eda2a95edd78198/mypy-1.19.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:da4869fc5e7f62a88f3fe0b5c919d1d9f7ea3cef92d3689de2823fd27e40aa75", size = 13840848, upload-time = "2025-12-15T05:02:55.95Z" }, + { url = "https://files.pythonhosted.org/packages/98/76/d32051fa65ecf6cc8c6610956473abdc9b4c43301107476ac03559507843/mypy-1.19.1-cp313-cp313-win_amd64.whl", hash = "sha256:016f2246209095e8eda7538944daa1d60e1e8134d98983b9fc1e92c1fc0cb8dd", size = 10135510, upload-time = "2025-12-15T05:02:58.438Z" }, + { url = "https://files.pythonhosted.org/packages/de/eb/b83e75f4c820c4247a58580ef86fcd35165028f191e7e1ba57128c52782d/mypy-1.19.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:06e6170bd5836770e8104c8fdd58e5e725cfeb309f0a6c681a811f557e97eac1", size = 13199744, upload-time = "2025-12-15T05:03:30.823Z" }, + { url = "https://files.pythonhosted.org/packages/94/28/52785ab7bfa165f87fcbb61547a93f98bb20e7f82f90f165a1f69bce7b3d/mypy-1.19.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:804bd67b8054a85447c8954215a906d6eff9cabeabe493fb6334b24f4bfff718", size = 12215815, upload-time = "2025-12-15T05:02:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/0a/c6/bdd60774a0dbfb05122e3e925f2e9e846c009e479dcec4821dad881f5b52/mypy-1.19.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:21761006a7f497cb0d4de3d8ef4ca70532256688b0523eee02baf9eec895e27b", size = 12740047, upload-time = "2025-12-15T05:03:33.168Z" }, + { url = "https://files.pythonhosted.org/packages/32/2a/66ba933fe6c76bd40d1fe916a83f04fed253152f451a877520b3c4a5e41e/mypy-1.19.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:28902ee51f12e0f19e1e16fbe2f8f06b6637f482c459dd393efddd0ec7f82045", size = 13601998, upload-time = "2025-12-15T05:03:13.056Z" }, + { url = "https://files.pythonhosted.org/packages/e3/da/5055c63e377c5c2418760411fd6a63ee2b96cf95397259038756c042574f/mypy-1.19.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:481daf36a4c443332e2ae9c137dfee878fcea781a2e3f895d54bd3002a900957", size = 13807476, upload-time = "2025-12-15T05:03:17.977Z" }, + { url = "https://files.pythonhosted.org/packages/cd/09/4ebd873390a063176f06b0dbf1f7783dd87bd120eae7727fa4ae4179b685/mypy-1.19.1-cp314-cp314-win_amd64.whl", hash = "sha256:8bb5c6f6d043655e055be9b542aa5f3bdd30e4f3589163e85f93f3640060509f", size = 10281872, upload-time = "2025-12-15T05:03:05.549Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f4/4ce9a05ce5ded1de3ec1c1d96cf9f9504a04e54ce0ed55cfa38619a32b8d/mypy-1.19.1-py3-none-any.whl", hash = "sha256:f1235f5ea01b7db5468d53ece6aaddf1ad0b88d9e7462b86ef96fe04995d7247", size = 2471239, upload-time = "2025-12-15T05:03:07.248Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "omophub" +source = { editable = "." } +dependencies = [ + { name = "httpx" }, + { name = "typing-extensions" }, +] + +[package.optional-dependencies] +dev = [ + { name = "mypy" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-cov" }, + { name = "python-dotenv" }, + { name = "respx" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "httpx", specifier = ">=0.27.0" }, + { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.8.0" }, + { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.0.0" }, + { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.23.0" }, + { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=4.1.0" }, + { name = "python-dotenv", marker = "extra == 'dev'", specifier = ">=1.0.0" }, + { name = "respx", marker = "extra == 'dev'", specifier = ">=0.21.0" }, + { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.3.0" }, + { name = "typing-extensions", specifier = ">=4.5.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pytest" +version = "9.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, + { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" }, +] + +[[package]] +name = "pytest-cov" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage", extra = ["toml"] }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/f7/c933acc76f5208b3b00089573cf6a2bc26dc80a8aece8f52bb7d6b1855ca/pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1", size = 54328, upload-time = "2025-09-09T10:57:02.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, +] + +[[package]] +name = "respx" +version = "0.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/7c/96bd0bc759cf009675ad1ee1f96535edcb11e9666b985717eb8c87192a95/respx-0.22.0.tar.gz", hash = "sha256:3c8924caa2a50bd71aefc07aa812f2466ff489f1848c96e954a5362d17095d91", size = 28439, upload-time = "2024-12-19T22:33:59.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/67/afbb0978d5399bc9ea200f1d4489a23c9a1dad4eee6376242b8182389c79/respx-0.22.0-py2.py3-none-any.whl", hash = "sha256:631128d4c9aba15e56903fb5f66fb1eff412ce28dd387ca3a81339e52dbd3ad0", size = 25127, upload-time = "2024-12-19T22:33:57.837Z" }, +] + +[[package]] +name = "ruff" +version = "0.14.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/08/52232a877978dd8f9cf2aeddce3e611b40a63287dfca29b6b8da791f5e8d/ruff-0.14.10.tar.gz", hash = "sha256:9a2e830f075d1a42cd28420d7809ace390832a490ed0966fe373ba288e77aaf4", size = 5859763, upload-time = "2025-12-18T19:28:57.98Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/01/933704d69f3f05ee16ef11406b78881733c186fe14b6a46b05cfcaf6d3b2/ruff-0.14.10-py3-none-linux_armv6l.whl", hash = "sha256:7a3ce585f2ade3e1f29ec1b92df13e3da262178df8c8bdf876f48fa0e8316c49", size = 13527080, upload-time = "2025-12-18T19:29:25.642Z" }, + { url = "https://files.pythonhosted.org/packages/df/58/a0349197a7dfa603ffb7f5b0470391efa79ddc327c1e29c4851e85b09cc5/ruff-0.14.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:674f9be9372907f7257c51f1d4fc902cb7cf014b9980152b802794317941f08f", size = 13797320, upload-time = "2025-12-18T19:29:02.571Z" }, + { url = "https://files.pythonhosted.org/packages/7b/82/36be59f00a6082e38c23536df4e71cdbc6af8d7c707eade97fcad5c98235/ruff-0.14.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d85713d522348837ef9df8efca33ccb8bd6fcfc86a2cde3ccb4bc9d28a18003d", size = 12918434, upload-time = "2025-12-18T19:28:51.202Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/45c62a7f7e34da92a25804f813ebe05c88aa9e0c25e5cb5a7d23dd7450e3/ruff-0.14.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6987ebe0501ae4f4308d7d24e2d0fe3d7a98430f5adfd0f1fead050a740a3a77", size = 13371961, upload-time = "2025-12-18T19:29:04.991Z" }, + { url = "https://files.pythonhosted.org/packages/40/31/a5906d60f0405f7e57045a70f2d57084a93ca7425f22e1d66904769d1628/ruff-0.14.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:16a01dfb7b9e4eee556fbfd5392806b1b8550c9b4a9f6acd3dbe6812b193c70a", size = 13275629, upload-time = "2025-12-18T19:29:21.381Z" }, + { url = "https://files.pythonhosted.org/packages/3e/60/61c0087df21894cf9d928dc04bcd4fb10e8b2e8dca7b1a276ba2155b2002/ruff-0.14.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7165d31a925b7a294465fa81be8c12a0e9b60fb02bf177e79067c867e71f8b1f", size = 14029234, upload-time = "2025-12-18T19:29:00.132Z" }, + { url = "https://files.pythonhosted.org/packages/44/84/77d911bee3b92348b6e5dab5a0c898d87084ea03ac5dc708f46d88407def/ruff-0.14.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c561695675b972effb0c0a45db233f2c816ff3da8dcfbe7dfc7eed625f218935", size = 15449890, upload-time = "2025-12-18T19:28:53.573Z" }, + { url = "https://files.pythonhosted.org/packages/e9/36/480206eaefa24a7ec321582dda580443a8f0671fdbf6b1c80e9c3e93a16a/ruff-0.14.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bb98fcbbc61725968893682fd4df8966a34611239c9fd07a1f6a07e7103d08e", size = 15123172, upload-time = "2025-12-18T19:29:23.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/38/68e414156015ba80cef5473d57919d27dfb62ec804b96180bafdeaf0e090/ruff-0.14.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f24b47993a9d8cb858429e97bdf8544c78029f09b520af615c1d261bf827001d", size = 14460260, upload-time = "2025-12-18T19:29:27.808Z" }, + { url = "https://files.pythonhosted.org/packages/b3/19/9e050c0dca8aba824d67cc0db69fb459c28d8cd3f6855b1405b3f29cc91d/ruff-0.14.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59aabd2e2c4fd614d2862e7939c34a532c04f1084476d6833dddef4afab87e9f", size = 14229978, upload-time = "2025-12-18T19:29:11.32Z" }, + { url = "https://files.pythonhosted.org/packages/51/eb/e8dd1dd6e05b9e695aa9dd420f4577debdd0f87a5ff2fedda33c09e9be8c/ruff-0.14.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:213db2b2e44be8625002dbea33bb9c60c66ea2c07c084a00d55732689d697a7f", size = 14338036, upload-time = "2025-12-18T19:29:09.184Z" }, + { url = "https://files.pythonhosted.org/packages/6a/12/f3e3a505db7c19303b70af370d137795fcfec136d670d5de5391e295c134/ruff-0.14.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b914c40ab64865a17a9a5b67911d14df72346a634527240039eb3bd650e5979d", size = 13264051, upload-time = "2025-12-18T19:29:13.431Z" }, + { url = "https://files.pythonhosted.org/packages/08/64/8c3a47eaccfef8ac20e0484e68e0772013eb85802f8a9f7603ca751eb166/ruff-0.14.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1484983559f026788e3a5c07c81ef7d1e97c1c78ed03041a18f75df104c45405", size = 13283998, upload-time = "2025-12-18T19:29:06.994Z" }, + { url = "https://files.pythonhosted.org/packages/12/84/534a5506f4074e5cc0529e5cd96cfc01bb480e460c7edf5af70d2bcae55e/ruff-0.14.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c70427132db492d25f982fffc8d6c7535cc2fd2c83fc8888f05caaa248521e60", size = 13601891, upload-time = "2025-12-18T19:28:55.811Z" }, + { url = "https://files.pythonhosted.org/packages/0d/1e/14c916087d8598917dbad9b2921d340f7884824ad6e9c55de948a93b106d/ruff-0.14.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5bcf45b681e9f1ee6445d317ce1fa9d6cba9a6049542d1c3d5b5958986be8830", size = 14336660, upload-time = "2025-12-18T19:29:16.531Z" }, + { url = "https://files.pythonhosted.org/packages/f2/1c/d7b67ab43f30013b47c12b42d1acd354c195351a3f7a1d67f59e54227ede/ruff-0.14.10-py3-none-win32.whl", hash = "sha256:104c49fc7ab73f3f3a758039adea978869a918f31b73280db175b43a2d9b51d6", size = 13196187, upload-time = "2025-12-18T19:29:19.006Z" }, + { url = "https://files.pythonhosted.org/packages/fb/9c/896c862e13886fae2af961bef3e6312db9ebc6adc2b156fe95e615dee8c1/ruff-0.14.10-py3-none-win_amd64.whl", hash = "sha256:466297bd73638c6bdf06485683e812db1c00c7ac96d4ddd0294a338c62fdc154", size = 14661283, upload-time = "2025-12-18T19:29:30.16Z" }, + { url = "https://files.pythonhosted.org/packages/74/31/b0e29d572670dca3674eeee78e418f20bdf97fa8aa9ea71380885e175ca0/ruff-0.14.10-py3-none-win_arm64.whl", hash = "sha256:e51d046cf6dda98a4633b8a8a771451107413b0f07183b2bef03f075599e44e6", size = 13729839, upload-time = "2025-12-18T19:28:48.636Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +]