From 052b86bd294dbb886f40649a6792f775cd8b99ca Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 1 Dec 2025 13:57:56 -0800 Subject: [PATCH 01/26] Fix missing warning method in ProgressReporter Add warning() method to ProgressReporter class to fix AttributeError when activities_iterator.py calls self.progress.warning() during sync. --- src/garmy/localdb/progress.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/garmy/localdb/progress.py b/src/garmy/localdb/progress.py index 208530c..4f1125e 100644 --- a/src/garmy/localdb/progress.py +++ b/src/garmy/localdb/progress.py @@ -56,7 +56,11 @@ def task_failed(self, task: str, sync_date: date): def info(self, message: str): """Log info message.""" self.logger.info(message) - + + def warning(self, message: str): + """Log warning message.""" + self.logger.warning(message) + def error(self, message: str): """Log error message.""" self.logger.error(message) From aeba69d12ba0443d426f7aef5ae3ef457aad4e3d Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 1 Dec 2025 13:58:46 -0800 Subject: [PATCH 02/26] Fix MFA prompt not triggering during sync login Pass prompt_mfa callback to AuthClient.login() so users with 2FA enabled are prompted for their MFA code. Previously, prompt_mfa defaulted to None which caused login to silently fail for 2FA accounts. --- src/garmy/localdb/sync.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index e71bbe6..a388583 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -43,7 +43,12 @@ def initialize(self, email: str, password: str): from garmy import AuthClient, APIClient auth_client = AuthClient() - auth_client.login(email, password) + auth_client.login( + email, + password, + prompt_mfa=lambda: input("MFA code: "), + ) + self.api_client = APIClient(auth_client=auth_client) self.activities_iterator = ActivitiesIterator( From b6c97e4b7736beebf7fd467dcee5841fcc825dae Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 1 Dec 2025 14:25:56 -0800 Subject: [PATCH 03/26] Fix NULL value constraint failure in timeseries storage --- src/garmy/localdb/db.py | 3 +++ src/garmy/localdb/extractors.py | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index 46c3868..1e2c324 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -67,6 +67,9 @@ def store_timeseries_batch(self, user_id: int, metric_type: MetricType, data: Li """Store batch of timeseries data.""" with self.get_session() as session: for timestamp, value, metadata in data: + # Skip entries with None values (NOT NULL constraint) + if value is None: + continue timeseries = TimeSeries( user_id=user_id, metric_type=metric_type.value, diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index c867863..9bdff81 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -189,6 +189,8 @@ def extract_timeseries_data(self, data: Any, metric_type: MetricType) -> List[Tu if metric_type == MetricType.BODY_BATTERY: if hasattr(data, 'body_battery_readings') and data.body_battery_readings: for reading in data.body_battery_readings: + if reading.level is None: + continue metadata = { 'status': getattr(reading, 'status', None), 'version': getattr(reading, 'version', None) @@ -198,6 +200,8 @@ def extract_timeseries_data(self, data: Any, metric_type: MetricType) -> List[Tu elif metric_type == MetricType.STRESS: if hasattr(data, 'stress_readings') and data.stress_readings: for reading in data.stress_readings: + if reading.stress_level is None: + continue metadata = {} if hasattr(reading, 'stress_category'): metadata['stress_category'] = reading.stress_category @@ -208,7 +212,8 @@ def extract_timeseries_data(self, data: Any, metric_type: MetricType) -> List[Tu for reading in data.heart_rate_values_array: if isinstance(reading, (list, tuple)) and len(reading) >= 2: timestamp, heart_rate = reading[0], reading[1] - timeseries_data.append((timestamp, heart_rate, {})) + if heart_rate is not None: + timeseries_data.append((timestamp, heart_rate, {})) elif metric_type == MetricType.RESPIRATION: # Respiration might have different format - check if it has readings From 7623cc76dba930c03ff4ad653b91390f1c3b201c Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 1 Dec 2025 15:56:42 -0800 Subject: [PATCH 04/26] Support saved authentication tokens in sync CLI --- src/garmy/localdb/cli.py | 19 +++++++++++-------- src/garmy/localdb/sync.py | 30 +++++++++++++++++++++++------- 2 files changed, 34 insertions(+), 15 deletions(-) diff --git a/src/garmy/localdb/cli.py b/src/garmy/localdb/cli.py index 185e692..339d19b 100644 --- a/src/garmy/localdb/cli.py +++ b/src/garmy/localdb/cli.py @@ -76,13 +76,10 @@ def cmd_sync(args) -> int: start_date = end_date - timedelta(days=6) print(f"Syncing data from {start_date} to {end_date}") - - # Get credentials - email, password = get_credentials() - + # Setup progress reporter progress_reporter = ProgressReporter(use_tqdm=args.progress == 'tqdm') - + # Initialize sync manager config = LocalDBConfig() manager = SyncManager( @@ -90,10 +87,16 @@ def cmd_sync(args) -> int: config=config, progress_reporter=progress_reporter ) - - # Initialize with credentials + + # Try to initialize with saved tokens first print("Connecting to Garmin Connect...") - manager.initialize(email, password) + try: + manager.initialize() + print("Using saved authentication tokens") + except RuntimeError: + # No valid tokens, prompt for credentials + email, password = get_credentials() + manager.initialize(email, password) # Parse metrics metrics = parse_metrics(args.metrics) if args.metrics else list(MetricType) diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index a388583..b78ff4b 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -37,17 +37,33 @@ def __init__(self, self.api_client = None self.activities_iterator = None - def initialize(self, email: str, password: str): - """Initialize with Garmin credentials.""" + def initialize(self, email: Optional[str] = None, password: Optional[str] = None): + """Initialize with Garmin credentials or saved tokens. + + Args: + email: Garmin account email (optional if tokens are saved) + password: Garmin account password (optional if tokens are saved) + """ try: from garmy import AuthClient, APIClient auth_client = AuthClient() - auth_client.login( - email, - password, - prompt_mfa=lambda: input("MFA code: "), - ) + + # Check if already authenticated with saved tokens + if not auth_client.is_authenticated: + if auth_client.needs_refresh: + self.progress.info("Refreshing authentication tokens...") + auth_client.refresh_tokens() + elif email and password: + auth_client.login( + email, + password, + prompt_mfa=lambda: input("MFA code: "), + ) + else: + raise RuntimeError( + "No valid saved tokens found. Please provide email and password." + ) self.api_client = APIClient(auth_client=auth_client) From 6c5aecb5a763e9d5f27ea9c791a9b1a709fc4fae Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 1 Dec 2025 15:57:03 -0800 Subject: [PATCH 05/26] Handle None values in metric parsing --- src/garmy/localdb/extractors.py | 42 ++++++++++++++------ src/garmy/metrics/body_battery.py | 4 +- src/garmy/metrics/hrv.py | 4 +- src/garmy/metrics/sleep.py | 64 ++++++++++++++++--------------- 4 files changed, 68 insertions(+), 46 deletions(-) diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 9bdff81..049e3a4 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -66,25 +66,43 @@ def _extract_daily_summary_data(self, data: Any) -> Dict[str, Any]: } def _extract_sleep_data(self, data: Any) -> Dict[str, Any]: - """Extract sleep data from Sleep object - use the properties, stupid!""" - return { + """Extract sleep data from Sleep object.""" + result = { # Use the built-in properties from Sleep class 'sleep_duration_hours': getattr(data, 'sleep_duration_hours', None), 'deep_sleep_percentage': getattr(data, 'deep_sleep_percentage', None), 'light_sleep_percentage': getattr(data, 'light_sleep_percentage', None), 'rem_sleep_percentage': getattr(data, 'rem_sleep_percentage', None), 'awake_percentage': getattr(data, 'awake_percentage', None), - - # Calculate hours from the summary if available - 'deep_sleep_hours': getattr(data.sleep_summary, 'deep_sleep_seconds', 0) / 3600 if hasattr(data, 'sleep_summary') and data.sleep_summary and getattr(data.sleep_summary, 'deep_sleep_seconds', 0) > 0 else None, - 'light_sleep_hours': getattr(data.sleep_summary, 'light_sleep_seconds', 0) / 3600 if hasattr(data, 'sleep_summary') and data.sleep_summary and getattr(data.sleep_summary, 'light_sleep_seconds', 0) > 0 else None, - 'rem_sleep_hours': getattr(data.sleep_summary, 'rem_sleep_seconds', 0) / 3600 if hasattr(data, 'sleep_summary') and data.sleep_summary and getattr(data.sleep_summary, 'rem_sleep_seconds', 0) > 0 else None, - 'awake_hours': getattr(data.sleep_summary, 'awake_sleep_seconds', 0) / 3600 if hasattr(data, 'sleep_summary') and data.sleep_summary and getattr(data.sleep_summary, 'awake_sleep_seconds', 0) > 0 else None, - - # Physiological data from summary - 'average_spo2': getattr(data.sleep_summary, 'average_sp_o2_value', None) if hasattr(data, 'sleep_summary') and data.sleep_summary else None, - 'average_respiration': getattr(data.sleep_summary, 'average_respiration_value', None) if hasattr(data, 'sleep_summary') and data.sleep_summary else None + 'deep_sleep_hours': None, + 'light_sleep_hours': None, + 'rem_sleep_hours': None, + 'awake_hours': None, + 'average_spo2': None, + 'average_respiration': None, } + + # Extract from sleep_summary if available + if hasattr(data, 'sleep_summary') and data.sleep_summary: + summary = data.sleep_summary + deep = getattr(summary, 'deep_sleep_seconds', None) + light = getattr(summary, 'light_sleep_seconds', None) + rem = getattr(summary, 'rem_sleep_seconds', None) + awake = getattr(summary, 'awake_sleep_seconds', None) + + if deep and deep > 0: + result['deep_sleep_hours'] = deep / 3600 + if light and light > 0: + result['light_sleep_hours'] = light / 3600 + if rem and rem > 0: + result['rem_sleep_hours'] = rem / 3600 + if awake and awake > 0: + result['awake_hours'] = awake / 3600 + + result['average_spo2'] = getattr(summary, 'average_sp_o2_value', None) + result['average_respiration'] = getattr(summary, 'average_respiration_value', None) + + return result def _extract_heart_rate_summary(self, data: Any) -> Dict[str, Any]: """Extract heart rate summary data.""" diff --git a/src/garmy/metrics/body_battery.py b/src/garmy/metrics/body_battery.py index c90c900..a22c8e7 100644 --- a/src/garmy/metrics/body_battery.py +++ b/src/garmy/metrics/body_battery.py @@ -81,7 +81,7 @@ class BodyBattery: user_profile_pk: int calendar_date: str - body_battery_values_array: List[List[Any]] + body_battery_values_array: Optional[List[List[Any]]] = None # Optional fields we ignore for Body Battery analysis start_timestamp_gmt: Optional["datetime"] = None @@ -100,6 +100,8 @@ class BodyBattery: def body_battery_readings(self) -> List[BodyBatteryReading]: """Parse raw Body Battery data into structured readings.""" readings = [] + if not self.body_battery_values_array: + return readings for item in self.body_battery_values_array: if len(item) >= 4: readings.append( diff --git a/src/garmy/metrics/hrv.py b/src/garmy/metrics/hrv.py index 3c4e7d7..0a92073 100644 --- a/src/garmy/metrics/hrv.py +++ b/src/garmy/metrics/hrv.py @@ -75,8 +75,8 @@ def parse_hrv_data(data: Dict[str, Any]) -> "HRV": ) # Parse HRV summary - hrv_summary_data = snake_dict.get("hrv_summary", {}) - baseline_data = hrv_summary_data.get("baseline", {}) + hrv_summary_data = snake_dict.get("hrv_summary") or {} + baseline_data = hrv_summary_data.get("baseline") or {} baseline = HRVBaseline( low_upper=baseline_data.get("low_upper", 0), diff --git a/src/garmy/metrics/sleep.py b/src/garmy/metrics/sleep.py index 6ea4890..67bfb31 100644 --- a/src/garmy/metrics/sleep.py +++ b/src/garmy/metrics/sleep.py @@ -115,13 +115,19 @@ def sleep_end_datetime_local(self) -> "datetime": return self.timestamp_to_datetime(self.sleep_end_timestamp_local) @property - def total_sleep_duration_hours(self) -> float: + def total_sleep_duration_hours(self) -> Optional[float]: """Get total sleep duration in hours.""" + if self.sleep_time_seconds is None: + return None return self.sleep_time_seconds / 3600 @property - def sleep_efficiency_percentage(self) -> float: + def sleep_efficiency_percentage(self) -> Optional[float]: """Calculate sleep efficiency (sleep time / time in bed).""" + if (self.sleep_end_timestamp_local is None or + self.sleep_start_timestamp_local is None or + self.sleep_time_seconds is None): + return None time_in_bed = ( self.sleep_end_timestamp_local - self.sleep_start_timestamp_local ) / 1000 @@ -204,49 +210,45 @@ def __str__(self) -> str: return "\n".join(lines) if lines else "Sleep data available" @property - def sleep_duration_hours(self) -> float: + def sleep_duration_hours(self) -> Optional[float]: """Get total sleep duration in hours.""" return self.sleep_summary.total_sleep_duration_hours @property - def deep_sleep_percentage(self) -> float: + def deep_sleep_percentage(self) -> Optional[float]: """Get deep sleep as percentage of total sleep.""" - if self.sleep_summary.sleep_time_seconds > 0: - return ( - self.sleep_summary.deep_sleep_seconds - / self.sleep_summary.sleep_time_seconds - ) * 100 - return 0 + total = self.sleep_summary.sleep_time_seconds + deep = self.sleep_summary.deep_sleep_seconds + if total and total > 0 and deep is not None: + return (deep / total) * 100 + return None @property - def light_sleep_percentage(self) -> float: + def light_sleep_percentage(self) -> Optional[float]: """Get light sleep as percentage of total sleep.""" - if self.sleep_summary.sleep_time_seconds > 0: - return ( - self.sleep_summary.light_sleep_seconds - / self.sleep_summary.sleep_time_seconds - ) * 100 - return 0 + total = self.sleep_summary.sleep_time_seconds + light = self.sleep_summary.light_sleep_seconds + if total and total > 0 and light is not None: + return (light / total) * 100 + return None @property - def rem_sleep_percentage(self) -> float: + def rem_sleep_percentage(self) -> Optional[float]: """Get REM sleep as percentage of total sleep.""" - if self.sleep_summary.sleep_time_seconds > 0: - return ( - self.sleep_summary.rem_sleep_seconds - / self.sleep_summary.sleep_time_seconds - ) * 100 - return 0 + total = self.sleep_summary.sleep_time_seconds + rem = self.sleep_summary.rem_sleep_seconds + if total and total > 0 and rem is not None: + return (rem / total) * 100 + return None @property - def awake_percentage(self) -> float: + def awake_percentage(self) -> Optional[float]: """Get awake time as percentage of total sleep period.""" - if self.sleep_summary.sleep_time_seconds > 0: - return ( - self.sleep_summary.awake_sleep_seconds - / self.sleep_summary.sleep_time_seconds - ) * 100 - return 0 + total = self.sleep_summary.sleep_time_seconds + awake = self.sleep_summary.awake_sleep_seconds + if total and total > 0 and awake is not None: + return (awake / total) * 100 + return None @property def spo2_readings_count(self) -> int: From 575b6ac9cc2be15d875ad15a7d431c8f07034ffc Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 1 Dec 2025 18:34:30 -0800 Subject: [PATCH 06/26] Add exercise sets sync for strength training activities Sync detailed exercise data (sets, reps, weight) for strength training activities from Garmin Connect. This enables tracking workout volume and progression over time. Changes: - Add ExerciseSet model for storing exercise data (category, name, reps, weight) - Extend Activity model with activity_type, distance, calories, elevation fields - Add exerciseSets API endpoint to ActivitiesAccessor - Extract and store exercise sets during activity sync - Calculate strength summary (total_sets, total_reps, total_weight_kg) - Add CLI backfill command for existing activities - Add activity details backfill status to CLI status command - Fix activities sync date order to match iterator (newest-first) - Add database schema migration for existing databases --- docs/database-schema.md | 139 ++++++++++++++++-- docs/localdb-guide.md | 98 ++++++++++++- src/garmy/localdb/activities_iterator.py | 13 ++ src/garmy/localdb/cli.py | 107 ++++++++++++-- src/garmy/localdb/db.py | 174 +++++++++++++++++++++-- src/garmy/localdb/extractors.py | 135 +++++++++++++++++- src/garmy/localdb/models.py | 40 +++++- src/garmy/localdb/sync.py | 122 ++++++++++++++-- src/garmy/metrics/activities.py | 59 +++++++- 9 files changed, 837 insertions(+), 50 deletions(-) diff --git a/docs/database-schema.md b/docs/database-schema.md index 455d4d0..3c1a087 100644 --- a/docs/database-schema.md +++ b/docs/database-schema.md @@ -6,10 +6,11 @@ Complete reference for Garmy's LocalDB database schema and structure. The Garmy LocalDB uses SQLite with optimized tables for health data storage: -- **4 main tables** for different data types +- **5 main tables** for different data types - **Normalized structure** for efficient querying - **Dedicated columns** for common health metrics - **Sync tracking** for data integrity +- **Automatic migrations** for schema updates ## 📊 Schema Diagram @@ -31,10 +32,19 @@ timeseries (High-frequency data) activities (Workouts and activities) ├── user_id, activity_id (PK) -├── activity_date, activity_name -├── duration_seconds, avg_heart_rate +├── activity_date, activity_name, activity_type +├── duration_seconds, avg_heart_rate, max_heart_rate +├── distance_meters, calories, elevation_gain/loss ├── training_load, start_time -└── created_at +├── total_sets, total_reps, total_weight_kg (strength) +└── details_synced, created_at, updated_at + +exercise_sets (Strength training sets) +├── user_id, activity_id, set_order (PK) +├── exercise_category, exercise_name +├── repetition_count, weight_grams +├── set_type, duration_seconds +└── start_time, created_at sync_status (Sync tracking) ├── user_id, sync_date, metric_type (PK) @@ -137,17 +147,78 @@ meta_data JSON -- Additional metadata (optional) **Columns:** ```sql +-- Identity user_id INTEGER -- User identifier activity_id STRING -- Garmin activity ID activity_date DATE -- Date of activity -activity_name STRING -- Activity type (e.g., "Running", "Cycling") +activity_name STRING -- Activity display name (e.g., "Morning Run") +activity_type STRING -- Activity type key (running, cycling, strength_training) + +-- Performance Metrics duration_seconds INTEGER -- Activity duration in seconds avg_heart_rate INTEGER -- Average heart rate during activity +max_heart_rate INTEGER -- Maximum heart rate during activity training_load FLOAT -- Training load/stress score start_time STRING -- Activity start time + +-- Distance and Movement +distance_meters FLOAT -- Total distance in meters +calories INTEGER -- Calories burned +elevation_gain FLOAT -- Elevation gained in meters +elevation_loss FLOAT -- Elevation lost in meters +avg_speed FLOAT -- Average speed (m/s) +max_speed FLOAT -- Maximum speed (m/s) + +-- Strength Training Summary (populated for strength activities) +total_sets INTEGER -- Total active sets in workout +total_reps INTEGER -- Total repetitions across all sets +total_weight_kg FLOAT -- Total volume (sum of weight × reps) + +-- Sync Tracking +details_synced BOOLEAN -- Whether exercise sets have been synced created_at DATETIME -- Record creation time +updated_at DATETIME -- Last update time +``` + +**Activity Types:** +- `running`, `cycling`, `swimming` - Cardio activities +- `strength_training`, `indoor_strength_training` - Strength workouts +- `walking`, `hiking` - Walking activities +- `yoga`, `pilates` - Flexibility/wellness + +### `exercise_sets` +**Purpose:** Individual exercise sets from strength training activities + +**Primary Key:** `(user_id, activity_id, set_order)` + +**Columns:** +```sql +-- Identity +user_id INTEGER -- User identifier +activity_id STRING -- Parent activity ID (FK to activities) +set_order INTEGER -- Order within activity (0-indexed) + +-- Exercise Info +exercise_category STRING -- Exercise type (CURL, BENCH_PRESS, SQUAT, etc.) +exercise_name STRING -- Custom exercise name (if available) +set_type STRING -- Set type: ACTIVE or REST + +-- Set Metrics +repetition_count INTEGER -- Number of reps in set +weight_grams FLOAT -- Weight in grams (divide by 1000 for kg) +duration_seconds FLOAT -- Set duration in seconds +start_time STRING -- Set start timestamp + +-- Metadata +created_at DATETIME -- Record creation time ``` +**Common Exercise Categories:** +- Upper body: `BENCH_PRESS`, `SHOULDER_PRESS`, `CURL`, `TRICEP_EXTENSION`, `LAT_PULLDOWN`, `ROW` +- Lower body: `SQUAT`, `DEADLIFT`, `LEG_PRESS`, `LUNGE`, `LEG_CURL`, `LEG_EXTENSION` +- Core: `PLANK`, `CRUNCH`, `RUSSIAN_TWIST` +- Other: `UNKNOWN` (when Garmin cannot identify the exercise) + ### `sync_status` **Purpose:** Track synchronization status for each metric per date @@ -229,18 +300,70 @@ ORDER BY timestamp; ### Sync Status Check ```sql -SELECT +SELECT sync_date, metric_type, status, synced_at, error_message -FROM sync_status -WHERE user_id = 1 +FROM sync_status +WHERE user_id = 1 AND status = 'failed' ORDER BY sync_date DESC; ``` +### Strength Training Volume +```sql +-- Weekly workout volume +SELECT + strftime('%Y-W%W', activity_date) as week, + COUNT(*) as workouts, + SUM(total_sets) as total_sets, + SUM(total_reps) as total_reps, + ROUND(SUM(total_weight_kg), 1) as total_volume_kg +FROM activities +WHERE user_id = 1 + AND activity_type IN ('strength_training', 'indoor_strength_training') + AND activity_date >= date('now', '-90 days') +GROUP BY week +ORDER BY week; +``` + +### Exercise Progression +```sql +-- Track max weight progression for an exercise +SELECT + a.activity_date, + e.exercise_category, + MAX(e.weight_grams) / 1000.0 as max_weight_kg, + SUM(e.repetition_count) as total_reps +FROM exercise_sets e +JOIN activities a ON e.activity_id = a.activity_id AND e.user_id = a.user_id +WHERE e.user_id = 1 + AND e.exercise_category = 'BENCH_PRESS' + AND e.set_type = 'ACTIVE' +GROUP BY a.activity_date +ORDER BY a.activity_date; +``` + +### Exercise Category Summary +```sql +-- Volume by exercise category +SELECT + exercise_category, + COUNT(*) as total_sets, + SUM(repetition_count) as total_reps, + ROUND(SUM(weight_grams) / 1000.0, 1) as total_weight_kg, + ROUND(AVG(weight_grams) / 1000.0, 1) as avg_weight_kg, + ROUND(AVG(repetition_count), 1) as avg_reps +FROM exercise_sets +WHERE user_id = 1 + AND set_type = 'ACTIVE' + AND weight_grams > 0 +GROUP BY exercise_category +ORDER BY total_weight_kg DESC; +``` + ## 📈 Data Relationships ### User-Centric Design diff --git a/docs/localdb-guide.md b/docs/localdb-guide.md index 3b6c1c8..9f74e22 100644 --- a/docs/localdb-guide.md +++ b/docs/localdb-guide.md @@ -33,6 +33,9 @@ garmy-sync status # Reset failed sync records garmy-sync reset --force + +# Backfill activity details for existing activities +garmy-sync backfill --limit 100 ``` ## 📊 Database Schema @@ -60,8 +63,18 @@ Individual workouts and activities with performance metrics. **Key Fields:** - `user_id`, `activity_id` (Primary Key) -- `activity_name`, `duration_seconds`, `avg_heart_rate` -- `training_load`, `activity_date` +- `activity_name`, `activity_type`, `duration_seconds` +- `avg_heart_rate`, `max_heart_rate`, `training_load` +- `distance_meters`, `calories`, `elevation_gain/loss` +- `total_sets`, `total_reps`, `total_weight_kg` (strength training) + +#### `exercise_sets` +Individual exercise sets from strength training activities. + +**Key Fields:** +- `user_id`, `activity_id`, `set_order` (Primary Key) +- `exercise_category` (CURL, BENCH_PRESS, SQUAT, etc.) +- `repetition_count`, `weight_grams`, `duration_seconds` #### `sync_status` Sync status tracking for each metric per date. @@ -208,13 +221,13 @@ with db.get_session() as session: ```python # Analyze workout intensity activity_query = """ - SELECT + SELECT activity_name, AVG(avg_heart_rate) as avg_hr, AVG(training_load) as avg_load, COUNT(*) as workout_count - FROM activities - WHERE user_id = 1 + FROM activities + WHERE user_id = 1 AND activity_date >= date('now', '-90 days') GROUP BY activity_name HAVING workout_count >= 3 @@ -223,14 +236,81 @@ activity_query = """ with db.get_session() as session: results = session.execute(text(activity_query)).fetchall() - + for row in results: print(f"{row.activity_name}: {row.avg_hr:.0f} BPM avg, " f"{row.avg_load:.1f} training load ({row.workout_count} workouts)") ``` +### Strength Training Analysis +```python +# Analyze workout volume over time +volume_query = """ + SELECT + activity_date, + activity_name, + total_sets, + total_reps, + total_weight_kg + FROM activities + WHERE user_id = 1 + AND activity_type = 'strength_training' + AND activity_date >= date('now', '-30 days') + ORDER BY activity_date DESC +""" + +# Analyze specific exercise categories +exercise_query = """ + SELECT + exercise_category, + COUNT(*) as total_sets, + SUM(repetition_count) as total_reps, + ROUND(SUM(weight_grams) / 1000.0, 1) as total_weight_kg, + ROUND(AVG(weight_grams) / 1000.0, 1) as avg_weight_kg + FROM exercise_sets + WHERE user_id = 1 + AND set_type = 'ACTIVE' + GROUP BY exercise_category + ORDER BY total_weight_kg DESC +""" + +# Track strength progression for a specific exercise +progression_query = """ + SELECT + a.activity_date, + e.exercise_category, + MAX(e.weight_grams) / 1000.0 as max_weight_kg, + AVG(e.repetition_count) as avg_reps + FROM exercise_sets e + JOIN activities a ON e.activity_id = a.activity_id AND e.user_id = a.user_id + WHERE e.user_id = 1 + AND e.exercise_category = 'BENCH_PRESS' + AND e.set_type = 'ACTIVE' + GROUP BY a.activity_date + ORDER BY a.activity_date +""" +``` + ## 🔄 Advanced Sync Operations +### Activity Details and Exercise Sets + +When syncing activities, the system automatically fetches exercise sets for strength training activities. This includes: +- Exercise category (CURL, BENCH_PRESS, SQUAT, etc.) +- Repetition count and weight +- Set duration and timing + +**Backfilling existing activities:** +```bash +# Backfill details for activities that were synced before this feature +garmy-sync backfill --limit 100 + +# Check backfill progress +garmy-sync status +``` + +The backfill command fetches exercise sets for strength training activities that don't have details yet. Use `--limit` to control how many activities to process per run. + ### Selective Metric Sync ```python from garmy.localdb.models import MetricType @@ -267,6 +347,12 @@ stats = sync_manager.sync_range(user_id=1, start_date=start_date, end_date=end_d ## 🛠️ Troubleshooting +### Database Migrations + +Database schema migrations are **automatic**. When new columns or tables are added (like `exercise_sets`), they are created automatically when you use the database. No manual migration steps required. + +For existing databases, new columns are added to the `activities` table using `ALTER TABLE` statements on first use. + ### Common Issues 1. **Database Lock Errors** diff --git a/src/garmy/localdb/activities_iterator.py b/src/garmy/localdb/activities_iterator.py index 7f88a9c..f5b3de8 100644 --- a/src/garmy/localdb/activities_iterator.py +++ b/src/garmy/localdb/activities_iterator.py @@ -31,6 +31,19 @@ def initialize(self): """Initialize the iterator by loading first batch.""" self._load_next_batch() self._advance_to_next_activity() + + def reset(self): + """Reset iterator state for a new sync session. + + This must be called before syncing activities to ensure the iterator + starts fresh and doesn't use stale cached data from previous syncs. + """ + self.current_activity = None + self.current_activity_date = None + self.activities_cache = [] + self.batch_offset = 0 + self.has_more_data = True + self.initialize() def _load_next_batch(self) -> bool: """Load next batch of activities from API.""" diff --git a/src/garmy/localdb/cli.py b/src/garmy/localdb/cli.py index 339d19b..6031413 100644 --- a/src/garmy/localdb/cli.py +++ b/src/garmy/localdb/cli.py @@ -168,10 +168,33 @@ def cmd_status(args) -> int: recent_records = session.query(SyncStatus).filter( SyncStatus.synced_at.isnot(None) ).order_by(SyncStatus.synced_at.desc()).limit(5).all() - + for record in recent_records: print(f"{record.synced_at} {record.sync_date} {record.metric_type}: {record.status}") - + + # Show activity details backfill status + from .models import Activity + from sqlalchemy import and_ + total_activities = session.query(Activity).filter( + Activity.user_id == args.user_id + ).count() + + backfilled = session.query(Activity).filter( + and_( + Activity.user_id == args.user_id, + Activity.details_synced == True # noqa: E712 + ) + ).count() + + pending = total_activities - backfilled + + print(f"\n=== ACTIVITY DETAILS BACKFILL ===") + print(f"Total activities: {total_activities}") + print(f"Details synced: {backfilled}") + print(f"Pending backfill: {pending}") + if total_activities > 0: + print(f"Progress: {backfilled / total_activities * 100:.1f}%") + return 0 except Exception as e: @@ -183,38 +206,86 @@ def cmd_reset(args) -> int: """Reset failed sync statuses to pending.""" try: from .db import HealthDB - + db = HealthDB(args.db_path) - + with db.get_session() as session: from .models import SyncStatus - + # Count failed records failed_count = session.query(SyncStatus).filter(SyncStatus.status == 'failed').count() - + if failed_count == 0: print("No failed records found") return 0 - + # Confirm reset if not args.force: response = input(f"Reset {failed_count} failed records to pending? (y/N): ") if response.lower() != 'y': print("Reset cancelled") return 0 - + # Reset failed to pending updated = session.query(SyncStatus).filter(SyncStatus.status == 'failed').update({ 'status': 'pending', 'error_message': None, 'synced_at': None }) - + session.commit() print(f"Reset {updated} failed records to pending") - + return 0 - + + except Exception as e: + print(f"Error: {e}") + return 1 + + +def cmd_backfill(args) -> int: + """Backfill activity details for existing activities.""" + try: + # Setup progress reporter + progress_reporter = ProgressReporter(use_tqdm=args.progress == 'tqdm') + + # Initialize sync manager + config = LocalDBConfig() + manager = SyncManager( + db_path=args.db_path, + config=config, + progress_reporter=progress_reporter + ) + + # Try to initialize with saved tokens first + print("Connecting to Garmin Connect...") + try: + manager.initialize() + print("Using saved authentication tokens") + except RuntimeError: + # No valid tokens, prompt for credentials + email, password = get_credentials() + manager.initialize(email, password) + + print(f"\nBackfilling activity details (limit: {args.limit} activities)") + + # Execute backfill + stats = manager.backfill_activity_details( + user_id=args.user_id, + limit=args.limit + ) + + # Print results + print(f"\nBackfill completed!") + print(f" Total activities: {stats['total']}") + print(f" Completed: {stats['completed']}") + print(f" Failed: {stats['failed']}") + + return 0 if stats['failed'] == 0 else 1 + + except KeyboardInterrupt: + print("\nBackfill interrupted by user") + return 130 except Exception as e: print(f"Error: {e}") return 1 @@ -232,6 +303,7 @@ def create_parser() -> argparse.ArgumentParser: %(prog)s sync --metrics DAILY_SUMMARY,SLEEP # Sync specific metrics %(prog)s status # Show sync status %(prog)s reset --force # Reset failed records + %(prog)s backfill --limit 50 # Backfill activity details """ ) @@ -269,7 +341,16 @@ def create_parser() -> argparse.ArgumentParser: reset_parser = subparsers.add_parser('reset', help='Reset failed sync records to pending') reset_parser.add_argument('--force', action='store_true', help='Reset without confirmation prompt') - + + # Backfill command + backfill_parser = subparsers.add_parser('backfill', + help='Backfill activity details for existing activities') + backfill_parser.add_argument('--limit', type=int, default=100, + help='Maximum number of activities to process (default: 100)') + backfill_parser.add_argument('--progress', choices=['tqdm', 'simple', 'silent'], + default='tqdm', + help='Progress display mode (default: tqdm)') + return parser @@ -289,6 +370,8 @@ def main() -> int: return cmd_status(args) elif args.command == 'reset': return cmd_reset(args) + elif args.command == 'backfill': + return cmd_backfill(args) else: print(f"Unknown command: {args.command}") return 1 diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index 1e2c324..2937107 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -4,10 +4,10 @@ from pathlib import Path from typing import List, Dict, Any, Optional, TYPE_CHECKING -from sqlalchemy import create_engine, and_ +from sqlalchemy import create_engine, and_, text, inspect from sqlalchemy.orm import sessionmaker, Session -from .models import Base, TimeSeries, Activity, DailyHealthMetric, SyncStatus, MetricType +from .models import Base, TimeSeries, Activity, DailyHealthMetric, SyncStatus, MetricType, ExerciseSet if TYPE_CHECKING: from .config import DatabaseConfig @@ -40,9 +40,49 @@ def __init__(self, self.engine = create_engine(f"sqlite:///{db_path}") self.SessionLocal = sessionmaker(bind=self.engine) - + Base.metadata.create_all(self.engine) - + + # Run migrations to add new columns to existing databases + self._migrate_schema() + + def _migrate_schema(self): + """Migrate database schema to add new columns for existing databases.""" + inspector = inspect(self.engine) + + # Check if activities table exists and needs migration + if 'activities' in inspector.get_table_names(): + existing_columns = {col['name'] for col in inspector.get_columns('activities')} + + # New columns to add to activities table + new_activity_columns = [ + ('activity_type', 'VARCHAR'), + ('distance_meters', 'FLOAT'), + ('calories', 'INTEGER'), + ('elevation_gain', 'FLOAT'), + ('elevation_loss', 'FLOAT'), + ('avg_speed', 'FLOAT'), + ('max_speed', 'FLOAT'), + ('max_heart_rate', 'INTEGER'), + ('total_sets', 'INTEGER'), + ('total_reps', 'INTEGER'), + ('total_weight_kg', 'FLOAT'), + ('details_synced', 'BOOLEAN DEFAULT 0'), + ('updated_at', 'DATETIME'), + ] + + with self.engine.connect() as conn: + for col_name, col_type in new_activity_columns: + if col_name not in existing_columns: + try: + conn.execute(text(f'ALTER TABLE activities ADD COLUMN {col_name} {col_type}')) + conn.commit() + except Exception: + # Column might already exist or other issue, continue + pass + + # Create exercise_sets table if it doesn't exist (handled by create_all above) + def get_session(self) -> Session: """Get database session.""" return self.SessionLocal() @@ -57,7 +97,7 @@ def get_schema_info(self) -> Dict[str, Any]: def validate_schema(self) -> bool: """Validate database schema.""" try: - expected_tables = {'timeseries', 'activities', 'daily_health_metrics', 'sync_status'} + expected_tables = {'timeseries', 'activities', 'daily_health_metrics', 'sync_status', 'exercise_sets'} actual_tables = set(Base.metadata.tables.keys()) return expected_tables.issubset(actual_tables) except Exception: @@ -81,7 +121,7 @@ def store_timeseries_batch(self, user_id: int, metric_type: MetricType, data: Li session.commit() def store_activity(self, user_id: int, activity_data: Dict[str, Any]): - """Store activity data.""" + """Store activity data including all available fields from API.""" with self.get_session() as session: activity = Activity( user_id=user_id, @@ -90,8 +130,17 @@ def store_activity(self, user_id: int, activity_data: Dict[str, Any]): activity_name=activity_data.get('activity_name'), duration_seconds=activity_data.get('duration_seconds'), avg_heart_rate=activity_data.get('avg_heart_rate'), + max_heart_rate=activity_data.get('max_heart_rate'), training_load=activity_data.get('training_load'), - start_time=activity_data.get('start_time') + start_time=activity_data.get('start_time'), + # Extended fields from activity list + activity_type=activity_data.get('activity_type'), + distance_meters=activity_data.get('distance_meters'), + calories=activity_data.get('calories'), + elevation_gain=activity_data.get('elevation_gain'), + elevation_loss=activity_data.get('elevation_loss'), + avg_speed=activity_data.get('avg_speed'), + max_speed=activity_data.get('max_speed'), ) session.merge(activity) session.commit() @@ -310,5 +359,114 @@ def _activity_to_dict(self, activity: Activity) -> Dict[str, Any]: 'avg_heart_rate': activity.avg_heart_rate, 'training_load': activity.training_load, 'start_time': activity.start_time, - 'created_at': activity.created_at + 'created_at': activity.created_at, + # Extended activity details + 'activity_type': activity.activity_type, + 'distance_meters': activity.distance_meters, + 'calories': activity.calories, + 'elevation_gain': activity.elevation_gain, + 'elevation_loss': activity.elevation_loss, + 'avg_speed': activity.avg_speed, + 'max_speed': activity.max_speed, + 'max_heart_rate': activity.max_heart_rate, + # Strength training summary + 'total_sets': activity.total_sets, + 'total_reps': activity.total_reps, + 'total_weight_kg': activity.total_weight_kg, + 'details_synced': activity.details_synced, + 'updated_at': activity.updated_at + } + + def store_exercise_sets(self, user_id: int, activity_id: str, sets: List[Dict[str, Any]]): + """Store exercise sets for an activity.""" + with self.get_session() as session: + for set_data in sets: + exercise_set = ExerciseSet( + user_id=user_id, + activity_id=activity_id, + set_order=set_data.get('set_order', 0), + exercise_category=set_data.get('exercise_category'), + exercise_name=set_data.get('exercise_name'), + set_type=set_data.get('set_type'), + repetition_count=set_data.get('repetition_count'), + weight_grams=set_data.get('weight_grams'), + duration_seconds=set_data.get('duration_seconds'), + start_time=set_data.get('start_time') + ) + session.merge(exercise_set) + session.commit() + + def get_exercise_sets(self, user_id: int, activity_id: str) -> List[Dict[str, Any]]: + """Get exercise sets for an activity.""" + with self.get_session() as session: + sets = session.query(ExerciseSet).filter( + and_( + ExerciseSet.user_id == user_id, + ExerciseSet.activity_id == activity_id + ) + ).order_by(ExerciseSet.set_order).all() + return [self._exercise_set_to_dict(s) for s in sets] + + def get_all_exercise_sets(self, user_id: int, start_date: date, end_date: date) -> List[Dict[str, Any]]: + """Get all exercise sets for activities in date range.""" + with self.get_session() as session: + # Join with activities to filter by date + sets = session.query(ExerciseSet).join( + Activity, + and_( + ExerciseSet.user_id == Activity.user_id, + ExerciseSet.activity_id == Activity.activity_id + ) + ).filter( + and_( + ExerciseSet.user_id == user_id, + Activity.activity_date >= start_date, + Activity.activity_date <= end_date + ) + ).order_by(Activity.activity_date, ExerciseSet.set_order).all() + return [self._exercise_set_to_dict(s) for s in sets] + + def update_activity_details(self, user_id: int, activity_id: str, details: Dict[str, Any]): + """Update activity with detailed data.""" + with self.get_session() as session: + activity = session.query(Activity).filter( + and_( + Activity.user_id == user_id, + Activity.activity_id == activity_id + ) + ).first() + + if activity: + for field, value in details.items(): + if hasattr(activity, field): + setattr(activity, field, value) + activity.details_synced = True + session.commit() + + def get_activities_without_details(self, user_id: int, limit: int = 100) -> List[Dict[str, Any]]: + """Get activities that haven't had details synced yet.""" + with self.get_session() as session: + activities = session.query(Activity).filter( + and_( + Activity.user_id == user_id, + Activity.details_synced == False # noqa: E712 + ) + ).order_by(Activity.activity_date.desc()).limit(limit).all() + return [self._activity_to_dict(a) for a in activities] + + def _exercise_set_to_dict(self, exercise_set: ExerciseSet) -> Dict[str, Any]: + """Convert ExerciseSet to dictionary.""" + return { + 'user_id': exercise_set.user_id, + 'activity_id': exercise_set.activity_id, + 'set_order': exercise_set.set_order, + 'exercise_category': exercise_set.exercise_category, + 'exercise_name': exercise_set.exercise_name, + 'set_type': exercise_set.set_type, + 'repetition_count': exercise_set.repetition_count, + 'weight_grams': exercise_set.weight_grams, + 'weight_kg': exercise_set.weight_grams / 1000 if exercise_set.weight_grams else None, + 'duration_seconds': exercise_set.duration_seconds, + 'start_time': exercise_set.start_time, + 'created_at': exercise_set.created_at } \ No newline at end of file diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 049e3a4..2976c60 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -178,7 +178,11 @@ def _extract_respiration_summary(self, data: Any) -> Dict[str, Any]: return {} def _extract_activity_data(self, data: Any) -> Dict[str, Any]: - """Extract activity data from both parsed and raw formats.""" + """Extract activity data from both parsed and raw formats. + + Extracts comprehensive activity data from the activity list API response, + which includes all the fields we need without requiring separate API calls. + """ # Handle both object attributes and dict keys def get_value(obj, *keys): for key in keys: @@ -187,16 +191,45 @@ def get_value(obj, *keys): elif isinstance(obj, dict) and key in obj: return obj[key] return None - + + def get_nested_value(obj, outer_key, inner_key): + """Get value from nested dict/object.""" + outer = get_value(obj, outer_key) + if outer: + if isinstance(outer, dict): + return outer.get(inner_key) + elif hasattr(outer, inner_key): + return getattr(outer, inner_key, None) + return None + activity_id = get_value(data, 'activity_id', 'activityId') if activity_id: + # Extract activity type from nested activityType dict + # Parsed ActivitySummary uses 'type_key', raw dict uses 'typeKey' + activity_type = get_nested_value(data, 'activity_type', 'type_key') + if not activity_type: + activity_type = get_nested_value(data, 'activity_type', 'typeKey') + if not activity_type: + activity_type = get_nested_value(data, 'activityType', 'typeKey') + return { 'activity_id': activity_id, - 'activity_name': get_value(data, 'activity_name', 'activityName', 'activityTypeName'), + 'activity_name': get_value(data, 'activity_name', 'activityName'), 'duration_seconds': get_value(data, 'duration', 'movingDuration', 'elapsedDuration'), + # Heart rate - parsed uses average_hr/max_hr, raw uses averageHR/maxHR 'avg_heart_rate': get_value(data, 'average_hr', 'averageHR', 'avgHR'), - 'training_load': get_value(data, 'activity_training_load', 'trainingLoad'), - 'start_time': get_value(data, 'start_time_local', 'startTimeLocal', 'start_time') + 'max_heart_rate': get_value(data, 'max_hr', 'maxHR'), + 'training_load': get_value(data, 'activity_training_load', 'activityTrainingLoad', 'trainingLoad'), + 'start_time': get_value(data, 'start_time_local', 'startTimeLocal', 'start_time'), + # Activity type extracted above + 'activity_type': activity_type, + # These may not be in parsed ActivitySummary, but try anyway + 'distance_meters': get_value(data, 'distance', 'distance_meters'), + 'calories': get_value(data, 'calories'), + 'elevation_gain': get_value(data, 'elevation_gain', 'elevationGain'), + 'elevation_loss': get_value(data, 'elevation_loss', 'elevationLoss'), + 'avg_speed': get_value(data, 'average_speed', 'averageSpeed'), + 'max_speed': get_value(data, 'max_speed', 'maxSpeed'), } return {} @@ -254,4 +287,96 @@ def _extract_calories_data(self, data: Any) -> Dict[str, Any]: 'total_calories': getattr(data, 'total_kilocalories', None), 'active_calories': getattr(data, 'active_kilocalories', None), 'bmr_calories': getattr(data, 'bmr_kilocalories', None) + } + + def extract_activity_details(self, data: Dict) -> Dict[str, Any]: + """Extract detailed activity data from activity details API response. + + Args: + data: Raw API response from /activity-service/activity/{id} + + Returns: + Dict with normalized activity detail fields. + """ + if not data: + return {} + + activity_type_info = data.get('activityType', {}) + + return { + 'activity_type': activity_type_info.get('typeKey') if activity_type_info else None, + 'distance_meters': data.get('distance'), + 'calories': data.get('calories'), + 'elevation_gain': data.get('elevationGain'), + 'elevation_loss': data.get('elevationLoss'), + 'avg_speed': data.get('avgSpeed'), + 'max_speed': data.get('maxSpeed'), + 'max_heart_rate': data.get('maxHR'), + } + + def extract_exercise_sets(self, data: Dict, activity_id: str) -> List[Dict[str, Any]]: + """Extract exercise sets from exerciseSets API response. + + Args: + data: Raw API response from /activity-service/activity/{id}/exerciseSets + activity_id: The activity ID these sets belong to + + Returns: + List of dicts with normalized exercise set fields. + """ + if not data: + return [] + + sets = [] + exercise_sets = data.get('exerciseSets', []) + + for i, set_data in enumerate(exercise_sets): + exercises = set_data.get('exercises', []) + + # Get most probable exercise category from the exercises list + category = None + exercise_name = None + if exercises: + # Sort by probability and get the best match + best_match = max(exercises, key=lambda x: x.get('probability', 0)) + category = best_match.get('category') + exercise_name = best_match.get('name') + + sets.append({ + 'set_order': i, + 'exercise_category': category, + 'exercise_name': exercise_name, + 'set_type': set_data.get('setType'), + 'repetition_count': set_data.get('repetitionCount'), + 'weight_grams': set_data.get('weight'), # API returns weight in milligrams + 'duration_seconds': set_data.get('duration'), + 'start_time': set_data.get('startTime') + }) + + return sets + + def calculate_strength_summary(self, sets: List[Dict[str, Any]]) -> Dict[str, Any]: + """Calculate strength training summary from exercise sets. + + Args: + sets: List of exercise set dicts from extract_exercise_sets + + Returns: + Dict with total_sets, total_reps, total_weight_kg + """ + active_sets = [s for s in sets if s.get('set_type') == 'ACTIVE'] + + total_reps = sum(s.get('repetition_count', 0) or 0 for s in active_sets) + + # Calculate total volume (sum of weight * reps for each set) + total_volume_grams = 0 + for s in active_sets: + weight = s.get('weight_grams', 0) or 0 + reps = s.get('repetition_count', 0) or 0 + total_volume_grams += weight * reps + + return { + 'total_sets': len(active_sets), + 'total_reps': total_reps, + 'total_weight_kg': total_volume_grams / 1000 if total_volume_grams else 0 } \ No newline at end of file diff --git a/src/garmy/localdb/models.py b/src/garmy/localdb/models.py index cb510bc..d594cc5 100644 --- a/src/garmy/localdb/models.py +++ b/src/garmy/localdb/models.py @@ -3,7 +3,7 @@ from datetime import date, datetime from enum import Enum -from sqlalchemy import Column, Integer, String, Float, Date, DateTime, JSON, Text +from sqlalchemy import Column, Integer, String, Float, Date, DateTime, JSON, Text, Boolean from sqlalchemy.orm import declarative_base @@ -50,6 +50,44 @@ class Activity(Base): start_time = Column(String) created_at = Column(DateTime, default=datetime.utcnow) + # Activity type and detailed metrics + activity_type = Column(String) # running, cycling, strength_training, etc. + distance_meters = Column(Float) + calories = Column(Integer) + elevation_gain = Column(Float) + elevation_loss = Column(Float) + avg_speed = Column(Float) # meters per second + max_speed = Column(Float) + max_heart_rate = Column(Integer) + + # Strength training summary + total_sets = Column(Integer) + total_reps = Column(Integer) + total_weight_kg = Column(Float) # Calculated total volume + + # Sync tracking + details_synced = Column(Boolean, default=False) + updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + +class ExerciseSet(Base): + """Exercise sets from strength training activities.""" + __tablename__ = "exercise_sets" + + user_id = Column(Integer, primary_key=True, nullable=False) + activity_id = Column(String, primary_key=True, nullable=False) + set_order = Column(Integer, primary_key=True, nullable=False) # Order within activity + + exercise_category = Column(String) # CURL, BENCH_PRESS, SQUAT, etc. + exercise_name = Column(String) + set_type = Column(String) # ACTIVE, REST + repetition_count = Column(Integer) + weight_grams = Column(Float) # Store in grams for precision + duration_seconds = Column(Float) + start_time = Column(String) + + created_at = Column(DateTime, default=datetime.utcnow) + class DailyHealthMetric(Base): """Normalized daily health metrics with dedicated columns for efficient querying.""" diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index b78ff4b..7b1173b 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -104,7 +104,9 @@ def sync_range(self, user_id: int, start_date: date, end_date: date, if metrics is None: metrics = list(MetricType) + # Separate activities from other metrics - they need different iteration order non_activities_metrics = [m for m in metrics if m != MetricType.ACTIVITIES] + has_activities = MetricType.ACTIVITIES in metrics total_tasks = date_count * len(metrics) self.progress.start_sync(total_tasks) @@ -112,13 +114,26 @@ def sync_range(self, user_id: int, start_date: date, end_date: date, stats = {'completed': 0, 'skipped': 0, 'failed': 0, 'total_tasks': total_tasks} try: + # Create sync status entries for all dates for current_date in self._date_range(start_date, end_date): for metric_type in metrics: if not self.db.sync_status_exists(user_id, current_date, metric_type): self.db.create_sync_status(user_id, current_date, metric_type, 'pending') - - for current_date in self._date_range(start_date, end_date): - self._sync_date(user_id, current_date, metrics, stats) + + # Sync non-activities metrics (oldest to newest is fine) + if non_activities_metrics: + for current_date in self._date_range(start_date, end_date): + self._sync_date(user_id, current_date, non_activities_metrics, stats) + + # Sync activities separately in REVERSE order (newest to oldest) + # This matches the ActivitiesIterator which returns activities newest-first + if has_activities: + # Reset iterator to ensure fresh state for this sync + if self.activities_iterator: + self.activities_iterator.reset() + # Use end_date to start_date order for activities + for current_date in self._date_range(end_date, start_date): + self._sync_activities_for_date(user_id, current_date, stats) except Exception as e: raise @@ -128,14 +143,14 @@ def sync_range(self, user_id: int, start_date: date, end_date: date, return stats def _sync_date(self, user_id: int, sync_date: date, metrics: List[MetricType], stats: Dict[str, int]): - """Sync all metrics for a single date.""" + """Sync all non-activities metrics for a single date. + + Note: Activities are handled separately in sync_range() because they + require reverse date iteration to match the ActivitiesIterator. + """ for metric_type in metrics: try: - if metric_type == MetricType.ACTIVITIES: - self._sync_activities_for_date(user_id, sync_date, stats) - else: - self._sync_metric_for_date(user_id, sync_date, metric_type, stats) - + self._sync_metric_for_date(user_id, sync_date, metric_type, stats) except Exception as e: self.db.update_sync_status(user_id, sync_date, metric_type, 'failed', str(e)) self.progress.task_failed(f"{metric_type.value}", sync_date) @@ -208,12 +223,101 @@ def _sync_activities_for_date(self, user_id: int, sync_date: date, stats: Dict[s self.db.store_activity(user_id, activity_data) stats['completed'] += 1 + # Fetch and store activity details (exercise sets for strength training) + activity_type = activity_data.get('activity_type') + self._sync_activity_details(user_id, str(activity_id), activity_type) + self.progress.task_complete("activities", sync_date) except Exception as e: self.progress.task_failed("activities", sync_date) stats['failed'] += 1 + def _sync_activity_details(self, user_id: int, activity_id: str, activity_type: str = None): + """Sync detailed data for a single activity. + + For strength training activities, fetches exercise sets (reps, weight, etc.). + Basic activity details (distance, calories, etc.) are already extracted from + the activity list API response during the initial sync. + + Args: + user_id: User identifier + activity_id: Activity ID to fetch details for + activity_type: Activity type key (e.g., 'strength_training') + """ + try: + # Only fetch exercise sets for strength training activities + strength_types = ['strength_training', 'indoor_strength_training'] + + if activity_type and activity_type in strength_types: + activities_accessor = self.api_client.metrics.get('activities') + self._sync_exercise_sets(user_id, activity_id, activities_accessor) + + # Apply rate limiting delay after API call + import time + time.sleep(self.config.sync.rate_limit_delay) + + # Mark activity as having details synced + self.db.update_activity_details(user_id, activity_id, {'details_synced': True}) + + except Exception as e: + self.progress.warning(f"Failed to sync details for activity {activity_id}: {e}") + + def _sync_exercise_sets(self, user_id: int, activity_id: str, activities_accessor): + """Sync exercise sets for a strength training activity. + + Args: + user_id: User identifier + activity_id: Activity ID to fetch sets for + activities_accessor: The activities API accessor + """ + try: + sets_data = activities_accessor.get_exercise_sets(activity_id) + if sets_data: + sets = self.extractor.extract_exercise_sets(sets_data, activity_id) + if sets: + self.db.store_exercise_sets(user_id, activity_id, sets) + + # Calculate and store summary + summary = self.extractor.calculate_strength_summary(sets) + self.db.update_activity_details(user_id, activity_id, summary) + + except Exception as e: + self.progress.warning(f"Failed to sync exercise sets for activity {activity_id}: {e}") + + def backfill_activity_details(self, user_id: int, limit: int = 100) -> Dict[str, int]: + """Backfill detailed data for activities that don't have details synced. + + Args: + user_id: User identifier + limit: Maximum number of activities to process + + Returns: + Dict with sync statistics + """ + if not self.api_client: + raise RuntimeError("Must call initialize() before backfilling") + + stats = {'completed': 0, 'failed': 0, 'total': 0} + + activities = self.db.get_activities_without_details(user_id, limit) + stats['total'] = len(activities) + + self.progress.info(f"Backfilling details for {len(activities)} activities") + + for activity in activities: + activity_id = activity['activity_id'] + activity_type = activity.get('activity_type') + try: + self._sync_activity_details(user_id, str(activity_id), activity_type) + stats['completed'] += 1 + except Exception as e: + self.progress.warning(f"Failed to backfill activity {activity_id}: {e}") + stats['failed'] += 1 + + self.progress.info(f"Backfill complete: {stats['completed']} succeeded, {stats['failed']} failed") + return stats + def _store_health_metric(self, user_id: int, sync_date: date, metric_type: MetricType, data: Dict): """Store health metric data in normalized table.""" if metric_type == MetricType.DAILY_SUMMARY: diff --git a/src/garmy/metrics/activities.py b/src/garmy/metrics/activities.py index a2e92fb..94bf72f 100644 --- a/src/garmy/metrics/activities.py +++ b/src/garmy/metrics/activities.py @@ -12,7 +12,7 @@ from dataclasses import dataclass, field from datetime import datetime from functools import lru_cache -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from ..core.base import MetricConfig from ..core.utils import TimestampMixin, create_list_parser @@ -309,6 +309,63 @@ def get_by_type(self, activity_type: str, limit: int = 50) -> List[ActivitySumma if activity.activity_type_name.lower() == activity_type.lower() ] + def get_activity_details(self, activity_id: Union[int, str]) -> Dict[str, Any]: + """Get detailed activity data by ID. + + Args: + activity_id: The activity ID to fetch details for. + + Returns: + Dict containing full activity details including distance, calories, + elevation, speed, heart rate zones, and more. + """ + endpoint = f"/activity-service/activity/{activity_id}" + try: + return self.api_client.connectapi(endpoint) + except (SystemExit, KeyboardInterrupt, GeneratorExit): + raise + except Exception as e: + from ..core.utils import handle_api_exception + return handle_api_exception(e, "fetching activity details", endpoint, {}) + + def get_exercise_sets(self, activity_id: Union[int, str]) -> Dict[str, Any]: + """Get exercise sets for a strength training activity. + + Args: + activity_id: The activity ID to fetch exercise sets for. + + Returns: + Dict containing exerciseSets array with reps, weight, duration, + exercise category, and set type (ACTIVE/REST) for each set. + """ + endpoint = f"/activity-service/activity/{activity_id}/exerciseSets" + try: + return self.api_client.connectapi(endpoint) + except (SystemExit, KeyboardInterrupt, GeneratorExit): + raise + except Exception as e: + from ..core.utils import handle_api_exception + return handle_api_exception(e, "fetching exercise sets", endpoint, {}) + + def get_activity_splits(self, activity_id: Union[int, str]) -> Dict[str, Any]: + """Get split/lap data for an activity. + + Args: + activity_id: The activity ID to fetch splits for. + + Returns: + Dict containing split data for running, cycling, and other + activities with lap/split information. + """ + endpoint = f"/activity-service/activity/{activity_id}/splits" + try: + return self.api_client.connectapi(endpoint) + except (SystemExit, KeyboardInterrupt, GeneratorExit): + raise + except Exception as e: + from ..core.utils import handle_api_exception + return handle_api_exception(e, "fetching activity splits", endpoint, {}) + # For compatibility with MetricAccessor interface def get(self, *_args: Any, **_kwargs: Any) -> Any: """Not applicable for Activities - use list() instead.""" From 0e1059b7736cd320f7e79d9d2be3085903d3e8fb Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 1 Dec 2025 20:58:26 -0800 Subject: [PATCH 07/26] Add splits/laps sync for cardio activities - Add ActivitySplit model for storing lap data (distance, speed, HR, elevation, cadence, GPS) - Add splits extraction and storage methods in extractors.py and db.py - Update sync to fetch splits for cardio activities (running, cycling, walking, swimming, rowing) - Add backfill-splits CLI command for existing cardio activities - Update documentation with activity_splits table and analysis queries --- docs/database-schema.md | 132 +++++++++++++++++++++++++++++++- docs/localdb-guide.md | 90 ++++++++++++++++++++++ src/garmy/localdb/cli.py | 61 +++++++++++++++ src/garmy/localdb/db.py | 114 ++++++++++++++++++++++++++- src/garmy/localdb/extractors.py | 78 +++++++++++++++++++ src/garmy/localdb/models.py | 48 ++++++++++++ src/garmy/localdb/sync.py | 125 ++++++++++++++++++++++++++++-- 7 files changed, 639 insertions(+), 9 deletions(-) diff --git a/docs/database-schema.md b/docs/database-schema.md index 3c1a087..05efeb3 100644 --- a/docs/database-schema.md +++ b/docs/database-schema.md @@ -6,7 +6,7 @@ Complete reference for Garmy's LocalDB database schema and structure. The Garmy LocalDB uses SQLite with optimized tables for health data storage: -- **5 main tables** for different data types +- **6 main tables** for different data types - **Normalized structure** for efficient querying - **Dedicated columns** for common health metrics - **Sync tracking** for data integrity @@ -46,6 +46,16 @@ exercise_sets (Strength training sets) ├── set_type, duration_seconds └── start_time, created_at +activity_splits (Cardio lap/split data) +├── user_id, activity_id, lap_index (PK) +├── start_time, duration_seconds, moving_duration_seconds +├── distance_meters, avg_speed, max_speed +├── avg_heart_rate, max_heart_rate +├── elevation_gain, elevation_loss +├── avg_cadence, max_cadence, calories +├── start_latitude, start_longitude, end_latitude, end_longitude +└── intensity_type, created_at + sync_status (Sync tracking) ├── user_id, sync_date, metric_type (PK) ├── status, synced_at @@ -219,6 +229,66 @@ created_at DATETIME -- Record creation time - Core: `PLANK`, `CRUNCH`, `RUSSIAN_TWIST` - Other: `UNKNOWN` (when Garmin cannot identify the exercise) +### `activity_splits` +**Purpose:** Lap/split data from cardio activities (running, cycling, walking, swimming, etc.) + +**Primary Key:** `(user_id, activity_id, lap_index)` + +**Columns:** +```sql +-- Identity +user_id INTEGER -- User identifier +activity_id STRING -- Parent activity ID (FK to activities) +lap_index INTEGER -- 1-indexed lap number + +-- Timing +start_time STRING -- Lap start timestamp (ISO format) +duration_seconds FLOAT -- Total lap duration +moving_duration_seconds FLOAT -- Moving time (excludes pauses) + +-- Distance and Speed +distance_meters FLOAT -- Distance covered in lap +avg_speed FLOAT -- Average speed (m/s) +max_speed FLOAT -- Maximum speed (m/s) +avg_moving_speed FLOAT -- Average speed while moving (m/s) + +-- Heart Rate +avg_heart_rate INTEGER -- Average HR during lap +max_heart_rate INTEGER -- Maximum HR during lap + +-- Elevation +elevation_gain FLOAT -- Meters gained in lap +elevation_loss FLOAT -- Meters lost in lap +max_elevation FLOAT -- Highest point (meters) +min_elevation FLOAT -- Lowest point (meters) + +-- Cadence (running/walking) +avg_cadence FLOAT -- Average steps per minute +max_cadence FLOAT -- Maximum steps per minute + +-- Energy +calories FLOAT -- Calories burned in lap + +-- GPS Coordinates +start_latitude FLOAT -- Lap start latitude +start_longitude FLOAT -- Lap start longitude +end_latitude FLOAT -- Lap end latitude +end_longitude FLOAT -- Lap end longitude + +-- Type +intensity_type STRING -- Lap type: ACTIVE or REST + +-- Metadata +created_at DATETIME -- Record creation time +``` + +**Supported Activity Types:** +- Running: `running`, `treadmill_running`, `trail_running`, `track_running` +- Cycling: `cycling`, `indoor_cycling`, `virtual_ride`, `gravel_cycling`, `road_cycling` +- Walking/Hiking: `walking`, `hiking` +- Swimming: `swimming`, `lap_swimming`, `open_water_swimming` +- Other: `elliptical`, `stair_climbing`, `rowing`, `indoor_rowing` + ### `sync_status` **Purpose:** Track synchronization status for each metric per date @@ -364,6 +434,66 @@ GROUP BY exercise_category ORDER BY total_weight_kg DESC; ``` +### Lap/Split Analysis +```sql +-- Detailed splits for a specific activity +SELECT + lap_index, + distance_meters, + duration_seconds, + ROUND(duration_seconds / (distance_meters / 1000.0), 1) as pace_sec_per_km, + avg_heart_rate, + max_heart_rate, + elevation_gain, + avg_cadence +FROM activity_splits +WHERE user_id = 1 + AND activity_id = '123456789' +ORDER BY lap_index; +``` + +### Running Pace Consistency +```sql +-- Analyze pace consistency across runs +SELECT + a.activity_date, + a.activity_name, + COUNT(s.lap_index) as total_laps, + ROUND(AVG(s.duration_seconds / (s.distance_meters / 1000.0)), 1) as avg_pace_sec_km, + ROUND(MAX(s.duration_seconds / (s.distance_meters / 1000.0)) - + MIN(s.duration_seconds / (s.distance_meters / 1000.0)), 1) as pace_variance +FROM activities a +JOIN activity_splits s ON a.activity_id = s.activity_id AND a.user_id = s.user_id +WHERE a.user_id = 1 + AND a.activity_type = 'running' + AND s.distance_meters > 0 + AND a.activity_date >= date('now', '-30 days') +GROUP BY a.activity_id +ORDER BY a.activity_date DESC; +``` + +### Heart Rate Zones Per Lap +```sql +-- Analyze HR distribution across laps +SELECT + lap_index, + avg_heart_rate, + CASE + WHEN avg_heart_rate < 120 THEN 'Zone 1 (Easy)' + WHEN avg_heart_rate < 140 THEN 'Zone 2 (Aerobic)' + WHEN avg_heart_rate < 160 THEN 'Zone 3 (Tempo)' + WHEN avg_heart_rate < 175 THEN 'Zone 4 (Threshold)' + ELSE 'Zone 5 (Max)' + END as hr_zone, + distance_meters, + duration_seconds +FROM activity_splits +WHERE user_id = 1 + AND activity_id = '123456789' + AND avg_heart_rate IS NOT NULL +ORDER BY lap_index; +``` + ## 📈 Data Relationships ### User-Centric Design diff --git a/docs/localdb-guide.md b/docs/localdb-guide.md index 9f74e22..db79366 100644 --- a/docs/localdb-guide.md +++ b/docs/localdb-guide.md @@ -36,6 +36,9 @@ garmy-sync reset --force # Backfill activity details for existing activities garmy-sync backfill --limit 100 + +# Backfill splits/laps for cardio activities +garmy-sync backfill-splits --limit 100 ``` ## 📊 Database Schema @@ -76,6 +79,16 @@ Individual exercise sets from strength training activities. - `exercise_category` (CURL, BENCH_PRESS, SQUAT, etc.) - `repetition_count`, `weight_grams`, `duration_seconds` +#### `activity_splits` +Lap/split data from cardio activities (running, cycling, walking, etc.). + +**Key Fields:** +- `user_id`, `activity_id`, `lap_index` (Primary Key) +- `distance_meters`, `duration_seconds`, `moving_duration_seconds` +- `avg_speed`, `max_speed`, `avg_heart_rate`, `max_heart_rate` +- `elevation_gain`, `elevation_loss`, `avg_cadence` +- `start_latitude`, `start_longitude`, `end_latitude`, `end_longitude` + #### `sync_status` Sync status tracking for each metric per date. @@ -291,6 +304,57 @@ progression_query = """ """ ``` +### Cardio Splits Analysis +```python +# Analyze pace per lap for a running activity +pace_query = """ + SELECT + lap_index, + distance_meters, + duration_seconds, + ROUND(duration_seconds / (distance_meters / 1000.0), 1) as pace_sec_per_km, + avg_heart_rate, + elevation_gain + FROM activity_splits + WHERE user_id = 1 + AND activity_id = '123456789' + ORDER BY lap_index +""" + +# Compare lap consistency across runs +consistency_query = """ + SELECT + a.activity_date, + a.activity_name, + COUNT(s.lap_index) as total_laps, + AVG(s.avg_speed) as avg_lap_speed, + MAX(s.avg_speed) - MIN(s.avg_speed) as speed_variance + FROM activities a + JOIN activity_splits s ON a.activity_id = s.activity_id AND a.user_id = s.user_id + WHERE a.user_id = 1 + AND a.activity_type = 'running' + AND a.activity_date >= date('now', '-30 days') + GROUP BY a.activity_id + ORDER BY a.activity_date DESC +""" + +# Negative splits analysis (running faster in later laps) +negative_splits_query = """ + SELECT + a.activity_date, + a.activity_name, + s1.avg_speed as first_half_speed, + s2.avg_speed as second_half_speed, + CASE WHEN s2.avg_speed > s1.avg_speed THEN 'Negative Split' ELSE 'Positive Split' END as split_type + FROM activities a + JOIN activity_splits s1 ON a.activity_id = s1.activity_id AND a.user_id = s1.user_id AND s1.lap_index = 1 + JOIN activity_splits s2 ON a.activity_id = s2.activity_id AND a.user_id = s2.user_id AND s2.lap_index = 2 + WHERE a.user_id = 1 + AND a.activity_type = 'running' + ORDER BY a.activity_date DESC +""" +``` + ## 🔄 Advanced Sync Operations ### Activity Details and Exercise Sets @@ -311,6 +375,32 @@ garmy-sync status The backfill command fetches exercise sets for strength training activities that don't have details yet. Use `--limit` to control how many activities to process per run. +### Splits/Laps for Cardio Activities + +For cardio activities (running, cycling, walking, swimming, etc.), the system automatically fetches split/lap data. This includes: +- Distance and timing per lap +- Heart rate (avg/max) per lap +- Speed and pace metrics +- Elevation changes +- GPS coordinates (start/end) +- Cadence (for running/walking) + +**Backfilling splits for existing activities:** +```bash +# Backfill splits for cardio activities that don't have them +garmy-sync backfill-splits --limit 100 + +# Check sync status +garmy-sync status +``` + +**Supported cardio activity types:** +- Running: `running`, `treadmill_running`, `trail_running`, `track_running` +- Cycling: `cycling`, `indoor_cycling`, `virtual_ride`, `gravel_cycling`, `road_cycling` +- Walking/Hiking: `walking`, `hiking` +- Swimming: `swimming`, `lap_swimming`, `open_water_swimming` +- Other: `elliptical`, `stair_climbing`, `rowing`, `indoor_rowing` + ### Selective Metric Sync ```python from garmy.localdb.models import MetricType diff --git a/src/garmy/localdb/cli.py b/src/garmy/localdb/cli.py index 6031413..9d1f581 100644 --- a/src/garmy/localdb/cli.py +++ b/src/garmy/localdb/cli.py @@ -291,6 +291,55 @@ def cmd_backfill(args) -> int: return 1 +def cmd_backfill_splits(args) -> int: + """Backfill splits for cardio activities.""" + try: + # Setup progress reporter + progress_reporter = ProgressReporter(use_tqdm=args.progress == 'tqdm') + + # Initialize sync manager + config = LocalDBConfig() + manager = SyncManager( + db_path=args.db_path, + config=config, + progress_reporter=progress_reporter + ) + + # Try to initialize with saved tokens first + print("Connecting to Garmin Connect...") + try: + manager.initialize() + print("Using saved authentication tokens") + except RuntimeError: + # No valid tokens, prompt for credentials + email, password = get_credentials() + manager.initialize(email, password) + + print(f"\nBackfilling splits for cardio activities (limit: {args.limit})") + + # Execute backfill + stats = manager.backfill_activity_splits( + user_id=args.user_id, + limit=args.limit + ) + + # Print results + print(f"\nSplits backfill completed!") + print(f" Total activities: {stats['total']}") + print(f" Completed: {stats['completed']}") + print(f" Skipped: {stats['skipped']}") + print(f" Failed: {stats['failed']}") + + return 0 if stats['failed'] == 0 else 1 + + except KeyboardInterrupt: + print("\nBackfill interrupted by user") + return 130 + except Exception as e: + print(f"Error: {e}") + return 1 + + def create_parser() -> argparse.ArgumentParser: """Create command-line argument parser.""" parser = argparse.ArgumentParser( @@ -304,6 +353,7 @@ def create_parser() -> argparse.ArgumentParser: %(prog)s status # Show sync status %(prog)s reset --force # Reset failed records %(prog)s backfill --limit 50 # Backfill activity details + %(prog)s backfill-splits --limit 50 # Backfill splits for cardio """ ) @@ -351,6 +401,15 @@ def create_parser() -> argparse.ArgumentParser: default='tqdm', help='Progress display mode (default: tqdm)') + # Backfill splits command + backfill_splits_parser = subparsers.add_parser('backfill-splits', + help='Backfill splits/laps for cardio activities') + backfill_splits_parser.add_argument('--limit', type=int, default=100, + help='Maximum number of activities to process (default: 100)') + backfill_splits_parser.add_argument('--progress', choices=['tqdm', 'simple', 'silent'], + default='tqdm', + help='Progress display mode (default: tqdm)') + return parser @@ -372,6 +431,8 @@ def main() -> int: return cmd_reset(args) elif args.command == 'backfill': return cmd_backfill(args) + elif args.command == 'backfill-splits': + return cmd_backfill_splits(args) else: print(f"Unknown command: {args.command}") return 1 diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index 2937107..a76ef54 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -7,7 +7,7 @@ from sqlalchemy import create_engine, and_, text, inspect from sqlalchemy.orm import sessionmaker, Session -from .models import Base, TimeSeries, Activity, DailyHealthMetric, SyncStatus, MetricType, ExerciseSet +from .models import Base, TimeSeries, Activity, DailyHealthMetric, SyncStatus, MetricType, ExerciseSet, ActivitySplit if TYPE_CHECKING: from .config import DatabaseConfig @@ -97,7 +97,7 @@ def get_schema_info(self) -> Dict[str, Any]: def validate_schema(self) -> bool: """Validate database schema.""" try: - expected_tables = {'timeseries', 'activities', 'daily_health_metrics', 'sync_status', 'exercise_sets'} + expected_tables = {'timeseries', 'activities', 'daily_health_metrics', 'sync_status', 'exercise_sets', 'activity_splits'} actual_tables = set(Base.metadata.tables.keys()) return expected_tables.issubset(actual_tables) except Exception: @@ -469,4 +469,114 @@ def _exercise_set_to_dict(self, exercise_set: ExerciseSet) -> Dict[str, Any]: 'duration_seconds': exercise_set.duration_seconds, 'start_time': exercise_set.start_time, 'created_at': exercise_set.created_at + } + + def store_activity_splits(self, user_id: int, activity_id: str, splits: List[Dict[str, Any]]): + """Store lap/split data for an activity.""" + with self.get_session() as session: + for split_data in splits: + split = ActivitySplit( + user_id=user_id, + activity_id=activity_id, + lap_index=split_data.get('lap_index', 0), + start_time=split_data.get('start_time'), + duration_seconds=split_data.get('duration_seconds'), + moving_duration_seconds=split_data.get('moving_duration_seconds'), + distance_meters=split_data.get('distance_meters'), + avg_speed=split_data.get('avg_speed'), + max_speed=split_data.get('max_speed'), + avg_moving_speed=split_data.get('avg_moving_speed'), + avg_heart_rate=split_data.get('avg_heart_rate'), + max_heart_rate=split_data.get('max_heart_rate'), + elevation_gain=split_data.get('elevation_gain'), + elevation_loss=split_data.get('elevation_loss'), + max_elevation=split_data.get('max_elevation'), + min_elevation=split_data.get('min_elevation'), + avg_cadence=split_data.get('avg_cadence'), + max_cadence=split_data.get('max_cadence'), + calories=split_data.get('calories'), + start_latitude=split_data.get('start_latitude'), + start_longitude=split_data.get('start_longitude'), + end_latitude=split_data.get('end_latitude'), + end_longitude=split_data.get('end_longitude'), + intensity_type=split_data.get('intensity_type'), + ) + session.merge(split) + session.commit() + + def get_activity_splits(self, user_id: int, activity_id: str) -> List[Dict[str, Any]]: + """Get lap/split data for an activity.""" + with self.get_session() as session: + splits = session.query(ActivitySplit).filter( + and_( + ActivitySplit.user_id == user_id, + ActivitySplit.activity_id == activity_id + ) + ).order_by(ActivitySplit.lap_index).all() + return [self._split_to_dict(s) for s in splits] + + def get_all_activity_splits(self, user_id: int, start_date: date, end_date: date) -> List[Dict[str, Any]]: + """Get all splits for activities in date range.""" + with self.get_session() as session: + # Join with activities to filter by date + splits = session.query(ActivitySplit).join( + Activity, + and_( + ActivitySplit.user_id == Activity.user_id, + ActivitySplit.activity_id == Activity.activity_id + ) + ).filter( + and_( + ActivitySplit.user_id == user_id, + Activity.activity_date >= start_date, + Activity.activity_date <= end_date + ) + ).order_by(Activity.activity_date, ActivitySplit.lap_index).all() + return [self._split_to_dict(s) for s in splits] + + def activity_has_splits(self, user_id: int, activity_id: str) -> bool: + """Check if activity already has splits stored.""" + with self.get_session() as session: + return session.query(ActivitySplit).filter( + and_( + ActivitySplit.user_id == user_id, + ActivitySplit.activity_id == activity_id + ) + ).first() is not None + + def _split_to_dict(self, split: ActivitySplit) -> Dict[str, Any]: + """Convert ActivitySplit to dictionary.""" + # Calculate pace in min/km if we have distance and duration + pace_min_km = None + if split.distance_meters and split.duration_seconds and split.distance_meters > 0: + pace_min_km = (split.duration_seconds / 60) / (split.distance_meters / 1000) + + return { + 'user_id': split.user_id, + 'activity_id': split.activity_id, + 'lap_index': split.lap_index, + 'start_time': split.start_time, + 'duration_seconds': split.duration_seconds, + 'moving_duration_seconds': split.moving_duration_seconds, + 'distance_meters': split.distance_meters, + 'distance_km': split.distance_meters / 1000 if split.distance_meters else None, + 'avg_speed': split.avg_speed, + 'max_speed': split.max_speed, + 'avg_moving_speed': split.avg_moving_speed, + 'pace_min_km': pace_min_km, + 'avg_heart_rate': split.avg_heart_rate, + 'max_heart_rate': split.max_heart_rate, + 'elevation_gain': split.elevation_gain, + 'elevation_loss': split.elevation_loss, + 'max_elevation': split.max_elevation, + 'min_elevation': split.min_elevation, + 'avg_cadence': split.avg_cadence, + 'max_cadence': split.max_cadence, + 'calories': split.calories, + 'start_latitude': split.start_latitude, + 'start_longitude': split.start_longitude, + 'end_latitude': split.end_latitude, + 'end_longitude': split.end_longitude, + 'intensity_type': split.intensity_type, + 'created_at': split.created_at } \ No newline at end of file diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 2976c60..26f9848 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -379,4 +379,82 @@ def calculate_strength_summary(self, sets: List[Dict[str, Any]]) -> Dict[str, An 'total_sets': len(active_sets), 'total_reps': total_reps, 'total_weight_kg': total_volume_grams / 1000 if total_volume_grams else 0 + } + + def extract_activity_splits(self, data: Dict, activity_id: str) -> List[Dict[str, Any]]: + """Extract lap/split data from splits API response. + + Args: + data: Raw API response from /activity-service/activity/{id}/splits + activity_id: The activity ID these splits belong to + + Returns: + List of dicts with normalized split/lap fields. + """ + if not data: + return [] + + splits = [] + lap_dtos = data.get('lapDTOs', []) + + for lap in lap_dtos: + splits.append({ + 'lap_index': lap.get('lapIndex', 0), + 'start_time': lap.get('startTimeGMT'), + 'duration_seconds': lap.get('duration'), + 'moving_duration_seconds': lap.get('movingDuration'), + 'distance_meters': lap.get('distance'), + 'avg_speed': lap.get('averageSpeed'), + 'max_speed': lap.get('maxSpeed'), + 'avg_moving_speed': lap.get('averageMovingSpeed'), + 'avg_heart_rate': int(lap.get('averageHR')) if lap.get('averageHR') else None, + 'max_heart_rate': int(lap.get('maxHR')) if lap.get('maxHR') else None, + 'elevation_gain': lap.get('elevationGain'), + 'elevation_loss': lap.get('elevationLoss'), + 'max_elevation': lap.get('maxElevation'), + 'min_elevation': lap.get('minElevation'), + 'avg_cadence': lap.get('averageRunCadence'), + 'max_cadence': lap.get('maxRunCadence'), + 'calories': lap.get('calories'), + 'start_latitude': lap.get('startLatitude'), + 'start_longitude': lap.get('startLongitude'), + 'end_latitude': lap.get('endLatitude'), + 'end_longitude': lap.get('endLongitude'), + 'intensity_type': lap.get('intensityType'), + }) + + return splits + + def calculate_splits_summary(self, splits: List[Dict[str, Any]]) -> Dict[str, Any]: + """Calculate activity summary from splits data. + + Args: + splits: List of split dicts from extract_activity_splits + + Returns: + Dict with total_laps and aggregated metrics + """ + active_splits = [s for s in splits if s.get('intensity_type') == 'ACTIVE'] + + if not active_splits: + return {'total_laps': len(splits)} + + total_distance = sum(s.get('distance_meters', 0) or 0 for s in active_splits) + total_duration = sum(s.get('duration_seconds', 0) or 0 for s in active_splits) + total_elevation_gain = sum(s.get('elevation_gain', 0) or 0 for s in active_splits) + total_calories = sum(s.get('calories', 0) or 0 for s in active_splits) + + # Calculate average pace (min/km) if we have distance + avg_pace_min_km = None + if total_distance > 0 and total_duration > 0: + # pace = time / distance, convert to min/km + avg_pace_min_km = (total_duration / 60) / (total_distance / 1000) + + return { + 'total_laps': len(active_splits), + 'total_distance_meters': total_distance, + 'total_duration_seconds': total_duration, + 'total_elevation_gain': total_elevation_gain, + 'total_calories': total_calories, + 'avg_pace_min_km': avg_pace_min_km } \ No newline at end of file diff --git a/src/garmy/localdb/models.py b/src/garmy/localdb/models.py index d594cc5..489c639 100644 --- a/src/garmy/localdb/models.py +++ b/src/garmy/localdb/models.py @@ -89,6 +89,54 @@ class ExerciseSet(Base): created_at = Column(DateTime, default=datetime.utcnow) +class ActivitySplit(Base): + """Lap/split data from cardio activities (running, cycling, walking, etc.).""" + __tablename__ = "activity_splits" + + user_id = Column(Integer, primary_key=True, nullable=False) + activity_id = Column(String, primary_key=True, nullable=False) + lap_index = Column(Integer, primary_key=True, nullable=False) # 1-indexed lap number + + # Timing + start_time = Column(String) # ISO timestamp + duration_seconds = Column(Float) + moving_duration_seconds = Column(Float) + + # Distance and speed + distance_meters = Column(Float) + avg_speed = Column(Float) # m/s + max_speed = Column(Float) # m/s + avg_moving_speed = Column(Float) # m/s + + # Heart rate + avg_heart_rate = Column(Integer) + max_heart_rate = Column(Integer) + + # Elevation + elevation_gain = Column(Float) # meters + elevation_loss = Column(Float) # meters + max_elevation = Column(Float) # meters + min_elevation = Column(Float) # meters + + # Cadence (running/walking) + avg_cadence = Column(Float) # steps per minute + max_cadence = Column(Float) # steps per minute + + # Calories + calories = Column(Float) + + # GPS coordinates + start_latitude = Column(Float) + start_longitude = Column(Float) + end_latitude = Column(Float) + end_longitude = Column(Float) + + # Type + intensity_type = Column(String) # ACTIVE, REST + + created_at = Column(DateTime, default=datetime.utcnow) + + class DailyHealthMetric(Base): """Normalized daily health metrics with dedicated columns for efficient querying.""" __tablename__ = "daily_health_metrics" diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index 7b1173b..bd5cb74 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -233,27 +233,44 @@ def _sync_activities_for_date(self, user_id: int, sync_date: date, stats: Dict[s self.progress.task_failed("activities", sync_date) stats['failed'] += 1 + # Activity types for fetching specific detail data + STRENGTH_TYPES = ['strength_training', 'indoor_strength_training'] + CARDIO_TYPES = [ + 'running', 'treadmill_running', 'trail_running', 'track_running', + 'cycling', 'indoor_cycling', 'virtual_ride', 'gravel_cycling', 'road_cycling', + 'walking', 'hiking', 'swimming', 'lap_swimming', 'open_water_swimming', + 'elliptical', 'stair_climbing', 'rowing', 'indoor_rowing' + ] + def _sync_activity_details(self, user_id: int, activity_id: str, activity_type: str = None): """Sync detailed data for a single activity. For strength training activities, fetches exercise sets (reps, weight, etc.). + For cardio activities, fetches lap/split data. Basic activity details (distance, calories, etc.) are already extracted from the activity list API response during the initial sync. Args: user_id: User identifier activity_id: Activity ID to fetch details for - activity_type: Activity type key (e.g., 'strength_training') + activity_type: Activity type key (e.g., 'strength_training', 'running') """ try: - # Only fetch exercise sets for strength training activities - strength_types = ['strength_training', 'indoor_strength_training'] + activities_accessor = self.api_client.metrics.get('activities') + api_called = False - if activity_type and activity_type in strength_types: - activities_accessor = self.api_client.metrics.get('activities') + # Fetch exercise sets for strength training activities + if activity_type and activity_type in self.STRENGTH_TYPES: self._sync_exercise_sets(user_id, activity_id, activities_accessor) + api_called = True + + # Fetch splits/laps for cardio activities + if activity_type and activity_type in self.CARDIO_TYPES: + self._sync_activity_splits(user_id, activity_id, activities_accessor) + api_called = True - # Apply rate limiting delay after API call + # Apply rate limiting delay after API calls + if api_called: import time time.sleep(self.config.sync.rate_limit_delay) @@ -285,6 +302,28 @@ def _sync_exercise_sets(self, user_id: int, activity_id: str, activities_accesso except Exception as e: self.progress.warning(f"Failed to sync exercise sets for activity {activity_id}: {e}") + def _sync_activity_splits(self, user_id: int, activity_id: str, activities_accessor): + """Sync lap/split data for a cardio activity. + + Args: + user_id: User identifier + activity_id: Activity ID to fetch splits for + activities_accessor: The activities API accessor + """ + try: + # Skip if already has splits + if self.db.activity_has_splits(user_id, activity_id): + return + + splits_data = activities_accessor.get_activity_splits(activity_id) + if splits_data: + splits = self.extractor.extract_activity_splits(splits_data, activity_id) + if splits: + self.db.store_activity_splits(user_id, activity_id, splits) + + except Exception as e: + self.progress.warning(f"Failed to sync splits for activity {activity_id}: {e}") + def backfill_activity_details(self, user_id: int, limit: int = 100) -> Dict[str, int]: """Backfill detailed data for activities that don't have details synced. @@ -318,6 +357,80 @@ def backfill_activity_details(self, user_id: int, limit: int = 100) -> Dict[str, self.progress.info(f"Backfill complete: {stats['completed']} succeeded, {stats['failed']} failed") return stats + def backfill_activity_splits(self, user_id: int, limit: int = 100) -> Dict[str, int]: + """Backfill splits for cardio activities that don't have splits yet. + + This is useful for activities that were synced before the splits feature + was added, or when activities have details_synced=True but no splits. + + Args: + user_id: User identifier + limit: Maximum number of activities to process + + Returns: + Dict with sync statistics + """ + if not self.api_client: + raise RuntimeError("Must call initialize() before backfilling") + + stats = {'completed': 0, 'skipped': 0, 'failed': 0, 'total': 0} + + # Get cardio activities that don't have splits + activities = self._get_cardio_activities_without_splits(user_id, limit) + stats['total'] = len(activities) + + self.progress.info(f"Backfilling splits for {len(activities)} cardio activities") + + activities_accessor = self.api_client.metrics.get('activities') + + for activity in activities: + activity_id = activity['activity_id'] + activity_type = activity.get('activity_type') + + # Skip if not a cardio type + if activity_type not in self.CARDIO_TYPES: + stats['skipped'] += 1 + continue + + try: + self._sync_activity_splits(user_id, str(activity_id), activities_accessor) + stats['completed'] += 1 + + # Rate limiting + import time + time.sleep(self.config.sync.rate_limit_delay) + + except Exception as e: + self.progress.warning(f"Failed to backfill splits for activity {activity_id}: {e}") + stats['failed'] += 1 + + self.progress.info(f"Splits backfill complete: {stats['completed']} succeeded, {stats['skipped']} skipped, {stats['failed']} failed") + return stats + + def _get_cardio_activities_without_splits(self, user_id: int, limit: int) -> List[Dict[str, Any]]: + """Get cardio activities that don't have splits stored yet.""" + with self.db.get_session() as session: + from .models import Activity, ActivitySplit + from sqlalchemy import and_, not_, exists + + # Subquery to find activities with splits + has_splits = exists().where( + and_( + ActivitySplit.user_id == Activity.user_id, + ActivitySplit.activity_id == Activity.activity_id + ) + ) + + activities = session.query(Activity).filter( + and_( + Activity.user_id == user_id, + Activity.activity_type.in_(self.CARDIO_TYPES), + ~has_splits + ) + ).order_by(Activity.activity_date.desc()).limit(limit).all() + + return [self.db._activity_to_dict(a) for a in activities] + def _store_health_metric(self, user_id: int, sync_date: date, metric_type: MetricType, data: Dict): """Store health metric data in normalized table.""" if metric_type == MetricType.DAILY_SUMMARY: From 393f435b19a6a19e919c697f4d94665e7d33135f Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Wed, 3 Dec 2025 11:22:17 -0800 Subject: [PATCH 08/26] Add body composition sync and enhanced sleep data fields Implement new health data types for expanded tracking: Body Composition: - New BodyComposition model with weight, BMI, body fat %, body water %, bone mass, muscle mass, visceral fat, metabolic age from smart scales - Add body_composition metric module with dataclasses and parser - Batch sync via /weight-service/weight/range endpoint - BODY_COMPOSITION enum value in MetricType Enhanced Sleep Data: - Add sleep_score (0-100) and sleep_score_qualifier (POOR/FAIR/GOOD/EXCELLENT) - Add sleep_bedtime and sleep_wake_time as ISO timestamp strings - Add sleep_need_minutes for target sleep duration - Add skin_temp_deviation_c from overnight skin temperature sensor - Custom parse_sleep_data() to capture top-level API fields Database: - Schema migration adds new columns to daily_health_metrics - Body composition CRUD operations with existence checking - Updated _metric_to_dict() with computed Fahrenheit conversion Sync Manager: - Body composition uses single batch API call for date range - Proper separation of batch vs date-by-date metric syncing --- examples/health_db_demo.py | 289 ++++--- examples/mcp_server_example.py | 36 +- examples/schema_demo.py | 65 +- examples/sleep_phases_analysis.py | 16 +- src/garmy/localdb/__init__.py | 6 +- src/garmy/localdb/__main__.py | 4 +- src/garmy/localdb/activities_iterator.py | 79 +- src/garmy/localdb/cli.py | 322 ++++---- src/garmy/localdb/config.py | 22 +- src/garmy/localdb/db.py | 936 +++++++++++++++-------- src/garmy/localdb/extractors.py | 551 ++++++++----- src/garmy/localdb/models.py | 71 +- src/garmy/localdb/progress.py | 5 +- src/garmy/localdb/sync.py | 440 ++++++++--- src/garmy/mcp/__init__.py | 3 +- src/garmy/mcp/__main__.py | 2 +- src/garmy/mcp/cli.py | 215 +++--- src/garmy/mcp/config.py | 20 +- src/garmy/mcp/server.py | 244 +++--- src/garmy/metrics/activities.py | 3 + src/garmy/metrics/body_composition.py | 231 ++++++ src/garmy/metrics/sleep.py | 55 +- tests/test_auth_init.py | 12 +- tests/test_core_http_client.py | 8 +- tests/test_metrics_comprehensive.py | 6 +- tests/test_metrics_remaining.py | 42 +- 26 files changed, 2409 insertions(+), 1274 deletions(-) create mode 100644 src/garmy/metrics/body_composition.py diff --git a/examples/health_db_demo.py b/examples/health_db_demo.py index 3edab53..ca71c4f 100644 --- a/examples/health_db_demo.py +++ b/examples/health_db_demo.py @@ -26,123 +26,132 @@ # Add project root to path sys.path.insert(0, str(Path(__file__).parent.parent)) -from src.garmy.localdb.sync import SyncManager from src.garmy.localdb.config import LocalDBConfig -from src.garmy.localdb.progress import create_reporter, MultiReporter +from src.garmy.localdb.progress import MultiReporter, create_reporter +from src.garmy.localdb.sync import SyncManager class HealthDBDemo: """Comprehensive demo of the health database system.""" - + def __init__(self): self.db_path = Path("health_demo.db") self.user_id = 1 self.sync_manager = None - + async def run_complete_demo(self): """Run the complete demonstration.""" print("🏥 Garmin Health Database System Demo") print("=" * 50) - + # Get credentials - email = os.getenv('GARMIN_EMAIL') - password = os.getenv('GARMIN_PASSWORD') - + email = os.getenv("GARMIN_EMAIL") + password = os.getenv("GARMIN_PASSWORD") + if not email or not password: - print("❌ Please set GARMIN_EMAIL and GARMIN_PASSWORD environment variables") + print( + "❌ Please set GARMIN_EMAIL and GARMIN_PASSWORD environment variables" + ) return - + try: await self._demo_progress_types() await self._demo_sync_and_analytics() await self._demo_data_export() await self._demo_advanced_queries() self._cleanup() - + except Exception as e: print(f"❌ Demo failed: {e}") import traceback + traceback.print_exc() - + async def _demo_progress_types(self): """Demo different progress reporting styles.""" print("\n📊 Progress Reporting Demo") print("-" * 30) - - email = os.getenv('GARMIN_EMAIL') - password = os.getenv('GARMIN_PASSWORD') - + + email = os.getenv("GARMIN_EMAIL") + password = os.getenv("GARMIN_PASSWORD") + # Demo period (small for quick demo) end_date = date.today() start_date = end_date - timedelta(days=2) - + # 1. Rich progress (if available) try: print("🎨 Rich Progress (beautiful terminal UI):") - rich_reporter = create_reporter("rich", name="Health Sync", show_stats_table=True) - + rich_reporter = create_reporter( + "rich", name="Health Sync", show_stats_table=True + ) + config = LocalDBConfig() sync_manager = SyncManager( db_path=Path("demo_rich.db"), config=config, - progress_reporter=rich_reporter + progress_reporter=rich_reporter, ) - + await sync_manager.initialize(email, password) await sync_manager.sync_range(self.user_id, start_date, end_date) print("✅ Rich demo completed\n") - + except ImportError: print("⚠️ Rich not available (install: pip install rich)\n") - + # 2. TQDM progress bar try: print("📊 TQDM Progress Bar:") - tqdm_reporter = create_reporter("tqdm", name="Health Sync", show_details=True) - + tqdm_reporter = create_reporter( + "tqdm", name="Health Sync", show_details=True + ) + config = LocalDBConfig() sync_manager = SyncManager( db_path=Path("demo_tqdm.db"), config=config, - progress_reporter=tqdm_reporter + progress_reporter=tqdm_reporter, ) - + await sync_manager.initialize(email, password) await sync_manager.sync_range(self.user_id, start_date, end_date) print("✅ TQDM demo completed\n") - + except ImportError: print("⚠️ TQDM not available (install: pip install tqdm)\n") - + # 3. Combined reporting print("🔄 Combined Progress (Logging + JSON):") multi_reporter = MultiReporter("Combined Sync") multi_reporter.add_reporter(create_reporter("logging", name="Health Sync")) - multi_reporter.add_reporter(create_reporter("json", output_file="sync_report.json", real_time=False)) - + multi_reporter.add_reporter( + create_reporter("json", output_file="sync_report.json", real_time=False) + ) + config = LocalDBConfig() sync_manager = SyncManager( db_path=Path("demo_combined.db"), config=config, - progress_reporter=multi_reporter + progress_reporter=multi_reporter, ) - + await sync_manager.initialize(email, password) await sync_manager.sync_range(self.user_id, start_date, end_date) print("✅ Combined demo completed (see sync_report.json)\n") - + async def _demo_sync_and_analytics(self): """Demo main synchronization and analytics.""" print("\n💚 Health Data Synchronization & Analytics") print("-" * 45) - + # Clean start if self.db_path.exists(): self.db_path.unlink() - + # Setup with automatic progress selection config = LocalDBConfig() - + try: progress_reporter = create_reporter("rich", name="Health Analytics") print("🎨 Using Rich progress display") @@ -153,52 +162,57 @@ async def _demo_sync_and_analytics(self): except ImportError: progress_reporter = create_reporter("logging", name="Health Analytics") print("📝 Using logging progress display") - + self.sync_manager = SyncManager( - db_path=self.db_path, - config=config, - progress_reporter=progress_reporter + db_path=self.db_path, config=config, progress_reporter=progress_reporter ) - + # Initialize - email = os.getenv('GARMIN_EMAIL') - password = os.getenv('GARMIN_PASSWORD') + email = os.getenv("GARMIN_EMAIL") + password = os.getenv("GARMIN_PASSWORD") await self.sync_manager.initialize(email, password) - + # Sync recent data end_date = date.today() start_date = end_date - timedelta(days=7) - + print(f"\n📅 Syncing health data: {start_date} to {end_date}") stats = await self.sync_manager.sync_range(self.user_id, start_date, end_date) - + print(f"\n📊 Sync Results:") print(f" ✅ Success: {stats['completed']}") print(f" ⏭️ Skipped: {stats['skipped']}") print(f" ❌ Failed: {stats['failed']}") print(f" 📈 Total: {stats['total_tasks']}") - + # Simple database statistics using direct SQL with self.sync_manager.db.connection() as conn: - health_count = conn.execute("SELECT COUNT(*) FROM daily_health_metrics").fetchone()[0] - activities_count = conn.execute("SELECT COUNT(*) FROM activities").fetchone()[0] - timeseries_count = conn.execute("SELECT COUNT(*) FROM timeseries").fetchone()[0] - + health_count = conn.execute( + "SELECT COUNT(*) FROM daily_health_metrics" + ).fetchone()[0] + activities_count = conn.execute( + "SELECT COUNT(*) FROM activities" + ).fetchone()[0] + timeseries_count = conn.execute( + "SELECT COUNT(*) FROM timeseries" + ).fetchone()[0] + print(f"\n🏗️ Database Statistics:") print(f" 📋 Health metrics: {health_count}") print(f" 🏃‍♂️ Activities: {activities_count}") print(f" 📊 Timeseries points: {timeseries_count}") - + # Show simple analytics using direct SQL await self._show_simple_analytics(start_date, end_date) - + async def _show_simple_analytics(self, start_date: date, end_date: date): """Show simple analytics using direct SQL queries.""" print(f"\n📊 Simple Analytics (Direct SQL)") - + with self.sync_manager.db.connection() as conn: # Health trends - trends = conn.execute(""" + trends = conn.execute( + """ SELECT AVG(total_steps) as avg_daily_steps, AVG(resting_heart_rate) as avg_resting_hr, @@ -206,88 +220,117 @@ async def _show_simple_analytics(self, start_date: date, end_date: date): COUNT(CASE WHEN total_steps > 10000 THEN 1 END) as days_over_10k_steps FROM daily_health_metrics WHERE user_id = ? AND metric_date BETWEEN ? AND ? - """, (self.user_id, start_date.isoformat(), end_date.isoformat())).fetchone() - + """, + (self.user_id, start_date.isoformat(), end_date.isoformat()), + ).fetchone() + if trends and trends[0]: print(f" 👟 Average daily steps: {trends[0]:,.0f}") - print(f" ❤️ Average resting HR: {trends[1]:.0f} bpm" if trends[1] else " ❤️ No HR data") - print(f" 😴 Average sleep: {trends[2]:.1f} hours" if trends[2] else " 😴 No sleep data") + print( + f" ❤️ Average resting HR: {trends[1]:.0f} bpm" + if trends[1] + else " ❤️ No HR data" + ) + print( + f" 😴 Average sleep: {trends[2]:.1f} hours" + if trends[2] + else " 😴 No sleep data" + ) print(f" 🎯 Days >10k steps: {trends[3]}") - + # Activities summary - activities = conn.execute(""" + activities = conn.execute( + """ SELECT COUNT(*) as total_activities, COUNT(DISTINCT activity_name) as activity_types FROM activities WHERE user_id = ? AND activity_date BETWEEN ? AND ? - """, (self.user_id, start_date.isoformat(), end_date.isoformat())).fetchone() - + """, + (self.user_id, start_date.isoformat(), end_date.isoformat()), + ).fetchone() + if activities and activities[0] > 0: print(f"\n🏃‍♂️ Activities:") print(f" 📈 Total activities: {activities[0]}") print(f" 🎯 Activity types: {activities[1]}") else: print(f"\n🏃‍♂️ No activities found in this period") - + async def _demo_data_export(self): """Demo data export capabilities.""" print(f"\n📤 Data Export Demo") print("-" * 20) - + if not self.sync_manager: print("⚠️ No sync manager available for export demo") return - + end_date = date.today() start_date = end_date - timedelta(days=7) - + # Export health metrics - health_data = self.sync_manager.query_health_metrics(self.user_id, start_date, end_date) + health_data = self.sync_manager.query_health_metrics( + self.user_id, start_date, end_date + ) if health_data: # Save to JSON export_file = "health_export.json" - with open(export_file, 'w') as f: + with open(export_file, "w") as f: json.dump(health_data, f, indent=2, default=str) - print(f"✅ Health metrics exported to {export_file} ({len(health_data)} records)") - + print( + f"✅ Health metrics exported to {export_file} ({len(health_data)} records)" + ) + # Export activities - activities = self.sync_manager.query_activities(self.user_id, start_date, end_date) + activities = self.sync_manager.query_activities( + self.user_id, start_date, end_date + ) if activities: activities_file = "activities_export.json" - with open(activities_file, 'w') as f: + with open(activities_file, "w") as f: json.dump(activities, f, indent=2, default=str) - print(f"✅ Activities exported to {activities_file} ({len(activities)} records)") - + print( + f"✅ Activities exported to {activities_file} ({len(activities)} records)" + ) + # Export timeseries (last day only) if health_data: - from src.garmy.localdb.models import MetricType from datetime import datetime - - last_date = datetime.strptime(health_data[-1]['metric_date'], '%Y-%m-%d').date() + + from src.garmy.localdb.models import MetricType + + last_date = datetime.strptime( + health_data[-1]["metric_date"], "%Y-%m-%d" + ).date() start_time = datetime.combine(last_date, datetime.min.time()) end_time = start_time + timedelta(days=1) - - hr_data = self.sync_manager.query_timeseries(self.user_id, MetricType.HEART_RATE, start_time, end_time) + + hr_data = self.sync_manager.query_timeseries( + self.user_id, MetricType.HEART_RATE, start_time, end_time + ) if hr_data: hr_file = "heart_rate_timeseries.json" - with open(hr_file, 'w') as f: + with open(hr_file, "w") as f: json.dump(hr_data, f, indent=2, default=str) - print(f"✅ Heart rate timeseries exported to {hr_file} ({len(hr_data)} points)") - + print( + f"✅ Heart rate timeseries exported to {hr_file} ({len(hr_data)} points)" + ) + async def _demo_advanced_queries(self): """Demo advanced SQL queries.""" print(f"\n🔍 Advanced Health Analytics") print("-" * 35) - + if not self.sync_manager: print("⚠️ No sync manager available for queries demo") return - + # Direct SQL queries for advanced analytics with self.sync_manager.db.connection() as conn: - + # 1. Sleep quality vs training readiness correlation print("📊 Sleep Quality vs Training Readiness:") - correlation = conn.execute(""" + correlation = conn.execute( + """ SELECT CASE WHEN sleep_duration_hours >= 8 THEN 'Good Sleep (8+ hrs)' @@ -301,14 +344,17 @@ async def _demo_advanced_queries(self): AND training_readiness_score IS NOT NULL GROUP BY 1 ORDER BY avg_readiness DESC - """, (self.user_id,)).fetchall() - + """, + (self.user_id,), + ).fetchall() + for row in correlation: print(f" {row[0]}: Readiness {row[1]:.0f}, {row[2]} days") - + # 2. Activity patterns by day of week print(f"\n📅 Activity Patterns by Day of Week:") - weekly_pattern = conn.execute(""" + weekly_pattern = conn.execute( + """ SELECT CASE strftime('%w', activity_date) WHEN '0' THEN 'Sunday' @@ -325,14 +371,17 @@ async def _demo_advanced_queries(self): WHERE user_id = ? GROUP BY strftime('%w', activity_date) ORDER BY strftime('%w', activity_date) - """, (self.user_id,)).fetchall() - + """, + (self.user_id,), + ).fetchall() + for row in weekly_pattern: print(f" {row[0]}: {row[1]} activities, {row[2]:.0f} min avg") - + # 3. Most active days print(f"\n🏆 Most Active Days:") - active_days = conn.execute(""" + active_days = conn.execute( + """ SELECT metric_date, total_steps, @@ -342,14 +391,17 @@ async def _demo_advanced_queries(self): WHERE user_id = ? AND total_steps IS NOT NULL ORDER BY total_steps DESC LIMIT 5 - """, (self.user_id,)).fetchall() - + """, + (self.user_id,), + ).fetchall() + for row in active_days: print(f" 📅 {row[0]}: {row[1]:,} steps, {row[2]} activities") - + # 4. Recovery analysis print(f"\n🔋 Recovery Analysis (Body Battery vs Stress):") - recovery = conn.execute(""" + recovery = conn.execute( + """ SELECT metric_date, body_battery_high, @@ -362,23 +414,30 @@ async def _demo_advanced_queries(self): AND avg_stress_level IS NOT NULL ORDER BY battery_recovery DESC LIMIT 5 - """, (self.user_id,)).fetchall() - + """, + (self.user_id,), + ).fetchall() + for row in recovery: print(f" 📅 {row[0]}: 🔋 Recovery {row[3]}, 😰 Stress {row[4]}") - + def _cleanup(self): """Clean up demo files.""" print(f"\n🧹 Cleanup") print("-" * 10) - + # Show file sizes demo_files = [ - "health_demo.db", "demo_rich.db", "demo_tqdm.db", "demo_combined.db", - "health_export.json", "activities_export.json", "heart_rate_timeseries.json", - "sync_report.json" + "health_demo.db", + "demo_rich.db", + "demo_tqdm.db", + "demo_combined.db", + "health_export.json", + "activities_export.json", + "heart_rate_timeseries.json", + "sync_report.json", ] - + print("📁 Generated files:") total_size = 0 for file_path in demo_files: @@ -387,12 +446,12 @@ def _cleanup(self): size_kb = path.stat().st_size / 1024 total_size += size_kb print(f" 📄 {file_path}: {size_kb:.1f} KB") - + print(f" 📊 Total size: {total_size:.1f} KB") - + # Option to clean up response = input("\n🗑️ Delete demo files? (y/N): ").lower().strip() - if response == 'y': + if response == "y": for file_path in demo_files: path = Path(file_path) if path.exists(): @@ -406,11 +465,11 @@ async def main(): """Main demo function.""" demo = HealthDBDemo() await demo.run_complete_demo() - + print(f"\n🎉 Demo completed!") print(f"\n💡 Next steps:") print(f" • Explore the generated files") - print(f" • Check out other examples in the examples/ directory") + print(f" • Check out other examples in the examples/ directory") print(f" • Read PROGRESS_SYSTEM.md for progress customization") print(f" • Integrate the health DB into your own projects") @@ -421,5 +480,5 @@ async def main(): print("📦 Optional dependencies for better progress display:") print(" pip install rich tqdm") print() - - asyncio.run(main()) \ No newline at end of file + + asyncio.run(main()) diff --git a/examples/mcp_server_example.py b/examples/mcp_server_example.py index 88bf6b9..394bdbe 100644 --- a/examples/mcp_server_example.py +++ b/examples/mcp_server_example.py @@ -17,30 +17,32 @@ def main(): """Demonstrate MCP server configuration and creation.""" - + # Example 1: Create config from database path db_path = Path("health.db") - + # Check if database exists (for demo purposes) if not db_path.exists(): - print(f"Database {db_path} not found. Please run garmy-sync first to create health data.") + print( + f"Database {db_path} not found. Please run garmy-sync first to create health data." + ) print("Example: garmy-sync sync --last-days 7") return - + # Create custom configuration config = MCPConfig.from_db_path( db_path=db_path, max_rows=500, # Limit to 500 rows per query enable_query_logging=True, # Enable query logging for debugging - strict_validation=True # Enable strict SQL validation + strict_validation=True, # Enable strict SQL validation ) - + print("MCP Server Configuration:") print(f" Database: {config.db_path}") print(f" Max rows per query: {config.max_rows}") print(f" Query logging: {config.enable_query_logging}") print(f" Strict validation: {config.strict_validation}") - + # Validate configuration try: config.validate() @@ -48,11 +50,11 @@ def main(): except Exception as e: print(f"❌ Configuration error: {e}") return - + # Create MCP server with custom config print("\\nCreating MCP server...") mcp_server = create_mcp_server(config) - + print(f"✅ MCP server created: {mcp_server.name}") print("\\nAvailable tools:") print(" 📊 explore_database_structure() - Start here to see available data") @@ -61,11 +63,13 @@ def main(): print(" 📋 get_health_summary(user_id, days) - Quick health overview") print("\\nAvailable resources:") print(" 📚 health_data_guide() - Complete usage guide") - + print("\\n🚀 To start the server, run:") print(f" garmy-mcp server --database {db_path}") print("\\n📋 With custom configuration:") - print(f" garmy-mcp server --database {db_path} --max-rows 500 --enable-query-logging") + print( + f" garmy-mcp server --database {db_path} --max-rows 500 --enable-query-logging" + ) print("\\n🔧 Or use environment variable:") print(f" export GARMY_DB_PATH={db_path}") print(" garmy-mcp server --max-rows 200 --verbose") @@ -73,15 +77,15 @@ def main(): print(f" garmy-mcp info --database {db_path}") print("\\n📋 Show configuration examples:") print(" garmy-mcp config") - + # Example 2: Environment-based configuration (backwards compatibility) - print("\\n" + "="*50) + print("\\n" + "=" * 50) print("Environment-based configuration example:") - - os.environ['GARMY_DB_PATH'] = str(db_path) + + os.environ["GARMY_DB_PATH"] = str(db_path) env_server = create_mcp_server() # Uses environment variable print(f"✅ Environment-based server created: {env_server.name}") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/schema_demo.py b/examples/schema_demo.py index 4bf350b..0ce380d 100644 --- a/examples/schema_demo.py +++ b/examples/schema_demo.py @@ -3,7 +3,7 @@ Demo of the new database schema architecture. This script demonstrates: -- Clean separation of schema definition from database logic +- Clean separation of schema definition from database logic - Schema validation and introspection - Centralized schema management - Easy schema evolution and migration planning @@ -15,29 +15,29 @@ # Add project root to path sys.path.insert(0, str(Path(__file__).parent.parent)) +from src.garmy.localdb.db import HealthDB from src.garmy.localdb.schema import ( - HEALTH_DB_SCHEMA, - get_schema_info, + HEALTH_DB_SCHEMA, + SchemaVersion, + get_schema_info, get_table_names, - SchemaVersion ) -from src.garmy.localdb.db import HealthDB def demo_schema_info(): """Demo schema introspection capabilities.""" print("🗄️ Database Schema Information") print("=" * 50) - + schema_info = get_schema_info() - + print(f"📊 Schema Version: {schema_info['version']}") print(f"📋 Total Tables: {schema_info['total_tables']}") print(f"🔍 Total Indexes: {schema_info['total_indexes']}") print() - + print("📁 Tables:") - for table_name, info in schema_info['tables'].items(): + for table_name, info in schema_info["tables"].items(): print(f" • {table_name}") print(f" Description: {info['description']}") print(f" Primary Key: {', '.join(info['primary_key'])}") @@ -49,17 +49,17 @@ def demo_schema_definition(): """Demo clean schema definition structure.""" print("\n🏗️ Schema Definition Structure") print("=" * 40) - + print(f"Schema contains {len(HEALTH_DB_SCHEMA.tables)} tables:") - + for table in HEALTH_DB_SCHEMA.tables: print(f"\n📋 {table.name.upper()}") print(f" Purpose: {table.description}") print(f" Primary Key: [{', '.join(table.primary_key)}]") print(f" Indexes: {len(table.indexes)} performance indexes") - + # Show table SQL (first few lines) - sql_lines = table.sql.strip().split('\n') + sql_lines = table.sql.strip().split("\n") print(f" Schema Preview:") for i, line in enumerate(sql_lines[:4]): if line.strip(): @@ -72,13 +72,13 @@ def demo_data_extraction(): """Demo how sync process extracts data to database columns.""" print("\n🔄 Data Extraction Process") print("=" * 30) - + print("The sync process uses direct attribute access:") print() print("📊 Example extraction logic:") print(" API Response → Database Column") print(" data.total_steps → total_steps") - print(" data.resting_heart_rate → resting_heart_rate") + print(" data.resting_heart_rate → resting_heart_rate") print(" data.sleep_duration_hours → sleep_duration_hours") print(" data.training_readiness.score → training_readiness_score") print() @@ -93,45 +93,46 @@ def demo_database_integration(): """Demo how the schema integrates with the database.""" print("\n💾 Database Integration Demo") print("=" * 35) - + # Create temporary database for demo db_path = Path("schema_demo.db") db = HealthDB(db_path) - + print("✅ Database initialized with new schema architecture") - + # Validate schema is_valid = db.validate_schema() print(f"🔍 Schema validation: {'✅ PASSED' if is_valid else '❌ FAILED'}") - + # Show schema info from database db_schema_info = db.get_schema_info() print(f"📊 Schema version: {db_schema_info['version']}") print(f"📋 Tables created: {db_schema_info['total_tables']}") - + print("\n📁 Expected vs Created Tables:") expected_tables = set(get_table_names()) print(f" Expected: {', '.join(sorted(expected_tables))}") - + # Check actual tables in database with db.connection() as conn: actual_tables = { - row[0] for row in conn.execute( + row[0] + for row in conn.execute( "SELECT name FROM sqlite_master WHERE type='table'" ).fetchall() } print(f" Created: {', '.join(sorted(actual_tables))}") - + missing = expected_tables - actual_tables extra = actual_tables - expected_tables - + if missing: print(f" ❌ Missing: {', '.join(missing)}") if extra: print(f" ➕ Extra: {', '.join(extra)}") if not missing and not extra: print(" ✅ Perfect match!") - + # Clean up demo database if db_path.exists(): db_path.unlink() @@ -142,18 +143,18 @@ def demo_benefits(): """Demo the benefits of this architecture.""" print("\n🌟 Benefits of Centralized Schema Management") print("=" * 55) - + benefits = [ "🧹 Clean separation: Schema definition is separate from database logic", "📚 Documentation: Each table has clear description and purpose", - "🔍 Introspection: Easy to query schema info programmatically", + "🔍 Introspection: Easy to query schema info programmatically", "🚀 Evolution: Schema changes are centralized and trackable", "🔧 Validation: Can validate database matches expected schema", "📊 Mapping: Clear mapping from API data to database columns", "🧪 Testing: Easy to create test schemas and validate migrations", - "🏗️ Maintenance: Single source of truth for all schema changes" + "🏗️ Maintenance: Single source of truth for all schema changes", ] - + for benefit in benefits: print(f" {benefit}") @@ -164,13 +165,13 @@ def main(): print("=" * 60) print("This demo shows the clean separation of schema definition") print("from database implementation logic.\n") - + demo_schema_info() demo_schema_definition() demo_data_extraction() demo_database_integration() demo_benefits() - + print(f"\n🎉 Schema Demo Complete!") print(f"💡 The schema is now:") print(f" • Documented and well-structured") @@ -180,4 +181,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/sleep_phases_analysis.py b/examples/sleep_phases_analysis.py index 0affc20..23c6d68 100644 --- a/examples/sleep_phases_analysis.py +++ b/examples/sleep_phases_analysis.py @@ -244,12 +244,14 @@ def get_date_input(prompt: str, default_date: date = None) -> date: """Get a date input from the user with validation.""" while True: if default_date: - date_str = input(f"{prompt} (YYYY-MM-DD) [default: {default_date}]: ").strip() + date_str = input( + f"{prompt} (YYYY-MM-DD) [default: {default_date}]: " + ).strip() if not date_str: return default_date else: date_str = input(f"{prompt} (YYYY-MM-DD): ").strip() - + try: parsed_date = datetime.strptime(date_str, "%Y-%m-%d").date() return parsed_date @@ -261,21 +263,21 @@ def get_date_range() -> tuple[date, date]: """Get start and end dates from user input.""" print("\n📅 Date Range Selection") print("-" * 30) - + # Default dates default_start = date.today() - timedelta(days=30) # 30 days ago default_end = date.today() - + print("Select the date range for sleep analysis:") start_date = get_date_input("Start date", default_start) - + # Validate end date is after start date while True: end_date = get_date_input("End date", default_end) if end_date >= start_date: break print(f"❌ End date must be on or after start date ({start_date})") - + return start_date, end_date @@ -288,7 +290,7 @@ def main(): # Get date range from user start_date, end_date = get_date_range() - + total_days = (end_date - start_date).days + 1 print(f"\n📊 Analysis Settings:") print(f" Start date: {start_date}") diff --git a/src/garmy/localdb/__init__.py b/src/garmy/localdb/__init__.py index 22e00d5..acc7370 100644 --- a/src/garmy/localdb/__init__.py +++ b/src/garmy/localdb/__init__.py @@ -1,8 +1,8 @@ """Simple local database module for Garmin health metrics storage and synchronization.""" +from .config import LocalDBConfig from .db import HealthDB -from .sync import SyncManager from .models import MetricType -from .config import LocalDBConfig +from .sync import SyncManager -__all__ = ['HealthDB', 'SyncManager', 'MetricType', 'LocalDBConfig'] \ No newline at end of file +__all__ = ["HealthDB", "SyncManager", "MetricType", "LocalDBConfig"] diff --git a/src/garmy/localdb/__main__.py b/src/garmy/localdb/__main__.py index 7bd265e..af76cea 100644 --- a/src/garmy/localdb/__main__.py +++ b/src/garmy/localdb/__main__.py @@ -3,5 +3,5 @@ from .cli import main -if __name__ == '__main__': - exit(main()) \ No newline at end of file +if __name__ == "__main__": + exit(main()) diff --git a/src/garmy/localdb/activities_iterator.py b/src/garmy/localdb/activities_iterator.py index f5b3de8..216631a 100644 --- a/src/garmy/localdb/activities_iterator.py +++ b/src/garmy/localdb/activities_iterator.py @@ -1,32 +1,32 @@ """Activity pagination and iteration utilities.""" +import asyncio from datetime import date from typing import Any, List, Optional -import asyncio class ActivitiesIterator: """Iterator-based activities synchronization with automatic pagination.""" - + def __init__(self, api_client, sync_config, progress_reporter): """Initialize activities iterator. - + Args: api_client: Garmin API client for data access sync_config: Sync configuration with batch sizes progress_reporter: Progress reporting interface """ self.api_client = api_client - self.sync_config = sync_config + self.sync_config = sync_config self.progress = progress_reporter - + # Iterator state self.current_activity = None self.current_activity_date = None self.activities_cache = [] self.batch_offset = 0 self.has_more_data = True - + def initialize(self): """Initialize the iterator by loading first batch.""" self._load_next_batch() @@ -44,38 +44,39 @@ def reset(self): self.batch_offset = 0 self.has_more_data = True self.initialize() - + def _load_next_batch(self) -> bool: """Load next batch of activities from API.""" if not self.has_more_data: return False - + try: batch_size = self.sync_config.activities_batch_size - activities_batch = self.api_client.metrics.get('activities').list( - limit=batch_size, - start=self.batch_offset + activities_batch = self.api_client.metrics.get("activities").list( + limit=batch_size, start=self.batch_offset ) - + if not activities_batch or len(activities_batch) == 0: self.has_more_data = False return False - + # Append to cache and update offset self.activities_cache.extend(activities_batch) self.batch_offset += len(activities_batch) - + # Check if we got less than requested (indicates end of data) if len(activities_batch) < batch_size: self.has_more_data = False - + return True - + except Exception as e: - self.progress.warning(f"Failed to load activities batch at offset {self.batch_offset}: {e}") + self.progress.warning( + f"Failed to load activities batch at offset {self.batch_offset}: {e}" + ) self.has_more_data = False return False - + def _advance_to_next_activity(self) -> bool: """Advance to next activity, loading batches as needed.""" while True: @@ -85,54 +86,62 @@ def _advance_to_next_activity(self) -> bool: self.current_activity = None self.current_activity_date = None return False - + # Get next activity from cache if self.activities_cache: self.current_activity = self.activities_cache.pop(0) - self.current_activity_date = self._extract_activity_date(self.current_activity) + self.current_activity_date = self._extract_activity_date( + self.current_activity + ) return True else: # No more activities available self.current_activity = None self.current_activity_date = None return False - + def _extract_activity_date(self, activity) -> Optional[date]: """Extract activity date from various possible fields.""" start_time = None - + # Try different attribute names for start time - for attr in ['start_time_local', 'startTimeLocal', 'start_time', 'activityDate']: + for attr in [ + "start_time_local", + "startTimeLocal", + "start_time", + "activityDate", + ]: if hasattr(activity, attr): start_time = getattr(activity, attr) break - + if start_time: try: # Handle ISO string format if isinstance(start_time, str): from datetime import datetime - start_time = start_time.replace('Z', '+00:00') - if '.' in start_time and '+' in start_time: + + start_time = start_time.replace("Z", "+00:00") + if "." in start_time and "+" in start_time: dt = datetime.fromisoformat(start_time) else: dt = datetime.fromisoformat(start_time) return dt.date() - elif hasattr(start_time, 'date'): + elif hasattr(start_time, "date"): return start_time.date() except Exception: pass return None - + def get_activities_for_date(self, target_date: date) -> List[Any]: """Get all activities for a specific date.""" activities = [] - + # Ensure we have a current activity if self.current_activity is None: if not self._advance_to_next_activity(): return activities - + # Process activities while they match or are newer than target_date while self.current_activity is not None: if self.current_activity_date is None: @@ -140,22 +149,22 @@ def get_activities_for_date(self, target_date: date) -> List[Any]: if not self._advance_to_next_activity(): break continue - + if self.current_activity_date > target_date: # Activity is newer than target - skip it if not self._advance_to_next_activity(): break continue - + elif self.current_activity_date == target_date: # Activity matches target date - collect it activities.append(self.current_activity) if not self._advance_to_next_activity(): break continue - + else: # self.current_activity_date < target_date # Activity is older than target - we're done for this date break - - return activities \ No newline at end of file + + return activities diff --git a/src/garmy/localdb/cli.py b/src/garmy/localdb/cli.py index 9d1f581..0116ee7 100644 --- a/src/garmy/localdb/cli.py +++ b/src/garmy/localdb/cli.py @@ -8,10 +8,10 @@ from pathlib import Path from typing import List, Optional -from .sync import SyncManager -from .progress import ProgressReporter -from .models import MetricType from .config import LocalDBConfig +from .models import MetricType +from .progress import ProgressReporter +from .sync import SyncManager def parse_date(date_str: str) -> date: @@ -19,27 +19,29 @@ def parse_date(date_str: str) -> date: try: return date.fromisoformat(date_str) except ValueError: - raise argparse.ArgumentTypeError(f"Invalid date format: {date_str}. Use YYYY-MM-DD") + raise argparse.ArgumentTypeError( + f"Invalid date format: {date_str}. Use YYYY-MM-DD" + ) def parse_metrics(metrics_str: str) -> List[MetricType]: """Parse comma-separated list of metrics.""" if not metrics_str: return list(MetricType) - - metric_names = [name.strip().upper() for name in metrics_str.split(',')] + + metric_names = [name.strip().upper() for name in metrics_str.split(",")] metrics = [] - + for name in metric_names: try: metric = MetricType[name] metrics.append(metric) except KeyError: - available = ', '.join([m.name for m in MetricType]) + available = ", ".join([m.name for m in MetricType]) raise argparse.ArgumentTypeError( f"Invalid metric: {name}. Available: {available}" ) - + return metrics @@ -47,17 +49,17 @@ def get_credentials() -> tuple[str, str]: """Safely get Garmin credentials from user input.""" print("Enter your Garmin Connect credentials:") email = input("Email: ").strip() - + if not email: print("Error: Email cannot be empty") sys.exit(1) - + password = getpass.getpass("Password: ") - + if not password: print("Error: Password cannot be empty") sys.exit(1) - + return email, password @@ -74,18 +76,16 @@ def cmd_sync(args) -> int: # Default: last 7 days end_date = date.today() start_date = end_date - timedelta(days=6) - + print(f"Syncing data from {start_date} to {end_date}") # Setup progress reporter - progress_reporter = ProgressReporter(use_tqdm=args.progress == 'tqdm') + progress_reporter = ProgressReporter(use_tqdm=args.progress == "tqdm") # Initialize sync manager config = LocalDBConfig() manager = SyncManager( - db_path=args.db_path, - config=config, - progress_reporter=progress_reporter + db_path=args.db_path, config=config, progress_reporter=progress_reporter ) # Try to initialize with saved tokens first @@ -97,29 +97,29 @@ def cmd_sync(args) -> int: # No valid tokens, prompt for credentials email, password = get_credentials() manager.initialize(email, password) - + # Parse metrics metrics = parse_metrics(args.metrics) if args.metrics else list(MetricType) - + print(f"Syncing metrics: {', '.join([m.name for m in metrics])}") - + # Execute sync stats = manager.sync_range( user_id=args.user_id, start_date=start_date, end_date=end_date, - metrics=metrics + metrics=metrics, ) - + # Print results print(f"\nSync completed!") print(f" Completed: {stats['completed']}") print(f" Skipped: {stats['skipped']}") print(f" Failed: {stats['failed']}") print(f" Total tasks: {stats['total_tasks']}") - - return 0 if stats['failed'] == 0 else 1 - + + return 0 if stats["failed"] == 0 else 1 + except KeyboardInterrupt: print("\nSync interrupted by user") return 130 @@ -132,59 +132,81 @@ def cmd_status(args) -> int: """Show sync status.""" try: from .db import HealthDB - + db = HealthDB(args.db_path) - + # Show overall statistics with db.get_session() as session: from .models import SyncStatus - + # Count by status status_counts = {} from sqlalchemy import func - all_statuses = session.query(SyncStatus.status, - func.count(SyncStatus.status)).group_by(SyncStatus.status).all() - + + all_statuses = ( + session.query(SyncStatus.status, func.count(SyncStatus.status)) + .group_by(SyncStatus.status) + .all() + ) + for status, count in all_statuses: status_counts[status] = count - + print("=== SYNC STATUS OVERVIEW ===") - for status in ['completed', 'pending', 'failed', 'skipped']: + for status in ["completed", "pending", "failed", "skipped"]: count = status_counts.get(status, 0) print(f"{status.capitalize()}: {count}") - + # Show failed records if any - if status_counts.get('failed', 0) > 0: + if status_counts.get("failed", 0) > 0: print(f"\n=== FAILED RECORDS ===") - failed_records = session.query(SyncStatus).filter( - SyncStatus.status == 'failed' - ).order_by(SyncStatus.sync_date.desc()).limit(10).all() - + failed_records = ( + session.query(SyncStatus) + .filter(SyncStatus.status == "failed") + .order_by(SyncStatus.sync_date.desc()) + .limit(10) + .all() + ) + for record in failed_records: - print(f"{record.sync_date} {record.metric_type}: {record.error_message}") - + print( + f"{record.sync_date} {record.metric_type}: {record.error_message}" + ) + # Show recent activity print(f"\n=== RECENT SYNC ACTIVITY ===") - recent_records = session.query(SyncStatus).filter( - SyncStatus.synced_at.isnot(None) - ).order_by(SyncStatus.synced_at.desc()).limit(5).all() + recent_records = ( + session.query(SyncStatus) + .filter(SyncStatus.synced_at.isnot(None)) + .order_by(SyncStatus.synced_at.desc()) + .limit(5) + .all() + ) for record in recent_records: - print(f"{record.synced_at} {record.sync_date} {record.metric_type}: {record.status}") + print( + f"{record.synced_at} {record.sync_date} {record.metric_type}: {record.status}" + ) # Show activity details backfill status - from .models import Activity from sqlalchemy import and_ - total_activities = session.query(Activity).filter( - Activity.user_id == args.user_id - ).count() - - backfilled = session.query(Activity).filter( - and_( - Activity.user_id == args.user_id, - Activity.details_synced == True # noqa: E712 + + from .models import Activity + + total_activities = ( + session.query(Activity).filter(Activity.user_id == args.user_id).count() + ) + + backfilled = ( + session.query(Activity) + .filter( + and_( + Activity.user_id == args.user_id, + Activity.details_synced == True, # noqa: E712 + ) ) - ).count() + .count() + ) pending = total_activities - backfilled @@ -196,7 +218,7 @@ def cmd_status(args) -> int: print(f"Progress: {backfilled / total_activities * 100:.1f}%") return 0 - + except Exception as e: print(f"Error: {e}") return 1 @@ -213,7 +235,9 @@ def cmd_reset(args) -> int: from .models import SyncStatus # Count failed records - failed_count = session.query(SyncStatus).filter(SyncStatus.status == 'failed').count() + failed_count = ( + session.query(SyncStatus).filter(SyncStatus.status == "failed").count() + ) if failed_count == 0: print("No failed records found") @@ -221,17 +245,19 @@ def cmd_reset(args) -> int: # Confirm reset if not args.force: - response = input(f"Reset {failed_count} failed records to pending? (y/N): ") - if response.lower() != 'y': + response = input( + f"Reset {failed_count} failed records to pending? (y/N): " + ) + if response.lower() != "y": print("Reset cancelled") return 0 # Reset failed to pending - updated = session.query(SyncStatus).filter(SyncStatus.status == 'failed').update({ - 'status': 'pending', - 'error_message': None, - 'synced_at': None - }) + updated = ( + session.query(SyncStatus) + .filter(SyncStatus.status == "failed") + .update({"status": "pending", "error_message": None, "synced_at": None}) + ) session.commit() print(f"Reset {updated} failed records to pending") @@ -247,14 +273,12 @@ def cmd_backfill(args) -> int: """Backfill activity details for existing activities.""" try: # Setup progress reporter - progress_reporter = ProgressReporter(use_tqdm=args.progress == 'tqdm') + progress_reporter = ProgressReporter(use_tqdm=args.progress == "tqdm") # Initialize sync manager config = LocalDBConfig() manager = SyncManager( - db_path=args.db_path, - config=config, - progress_reporter=progress_reporter + db_path=args.db_path, config=config, progress_reporter=progress_reporter ) # Try to initialize with saved tokens first @@ -271,8 +295,7 @@ def cmd_backfill(args) -> int: # Execute backfill stats = manager.backfill_activity_details( - user_id=args.user_id, - limit=args.limit + user_id=args.user_id, limit=args.limit ) # Print results @@ -281,7 +304,7 @@ def cmd_backfill(args) -> int: print(f" Completed: {stats['completed']}") print(f" Failed: {stats['failed']}") - return 0 if stats['failed'] == 0 else 1 + return 0 if stats["failed"] == 0 else 1 except KeyboardInterrupt: print("\nBackfill interrupted by user") @@ -295,14 +318,12 @@ def cmd_backfill_splits(args) -> int: """Backfill splits for cardio activities.""" try: # Setup progress reporter - progress_reporter = ProgressReporter(use_tqdm=args.progress == 'tqdm') + progress_reporter = ProgressReporter(use_tqdm=args.progress == "tqdm") # Initialize sync manager config = LocalDBConfig() manager = SyncManager( - db_path=args.db_path, - config=config, - progress_reporter=progress_reporter + db_path=args.db_path, config=config, progress_reporter=progress_reporter ) # Try to initialize with saved tokens first @@ -318,10 +339,7 @@ def cmd_backfill_splits(args) -> int: print(f"\nBackfilling splits for cardio activities (limit: {args.limit})") # Execute backfill - stats = manager.backfill_activity_splits( - user_id=args.user_id, - limit=args.limit - ) + stats = manager.backfill_activity_splits(user_id=args.user_id, limit=args.limit) # Print results print(f"\nSplits backfill completed!") @@ -330,7 +348,7 @@ def cmd_backfill_splits(args) -> int: print(f" Skipped: {stats['skipped']}") print(f" Failed: {stats['failed']}") - return 0 if stats['failed'] == 0 else 1 + return 0 if stats["failed"] == 0 else 1 except KeyboardInterrupt: print("\nBackfill interrupted by user") @@ -354,61 +372,101 @@ def create_parser() -> argparse.ArgumentParser: %(prog)s reset --force # Reset failed records %(prog)s backfill --limit 50 # Backfill activity details %(prog)s backfill-splits --limit 50 # Backfill splits for cardio - """ + """, ) - + # Global options - parser.add_argument('--db-path', type=Path, default=Path('health.db'), - help='Path to SQLite database file (default: health.db)') - parser.add_argument('--user-id', type=int, default=1, - help='User ID for database records (default: 1)') - + parser.add_argument( + "--db-path", + type=Path, + default=Path("health.db"), + help="Path to SQLite database file (default: health.db)", + ) + parser.add_argument( + "--user-id", + type=int, + default=1, + help="User ID for database records (default: 1)", + ) + # Subcommands - subparsers = parser.add_subparsers(dest='command', help='Available commands') - + subparsers = parser.add_subparsers(dest="command", help="Available commands") + # Sync command - sync_parser = subparsers.add_parser('sync', help='Synchronize data from Garmin Connect') - + sync_parser = subparsers.add_parser( + "sync", help="Synchronize data from Garmin Connect" + ) + # Date range options (mutually exclusive) date_group = sync_parser.add_mutually_exclusive_group() - date_group.add_argument('--last-days', type=int, metavar='N', - help='Sync data for last N days') - date_group.add_argument('--date-range', nargs=2, type=parse_date, - metavar=('START', 'END'), - help='Sync data between START and END dates (YYYY-MM-DD)') - + date_group.add_argument( + "--last-days", type=int, metavar="N", help="Sync data for last N days" + ) + date_group.add_argument( + "--date-range", + nargs=2, + type=parse_date, + metavar=("START", "END"), + help="Sync data between START and END dates (YYYY-MM-DD)", + ) + # Sync options - sync_parser.add_argument('--metrics', type=str, - help='Comma-separated list of metrics to sync (default: all)') - sync_parser.add_argument('--progress', choices=['tqdm', 'simple', 'silent'], - default='tqdm', - help='Progress display mode (default: tqdm)') - + sync_parser.add_argument( + "--metrics", + type=str, + help="Comma-separated list of metrics to sync (default: all)", + ) + sync_parser.add_argument( + "--progress", + choices=["tqdm", "simple", "silent"], + default="tqdm", + help="Progress display mode (default: tqdm)", + ) + # Status command - status_parser = subparsers.add_parser('status', help='Show synchronization status') - + status_parser = subparsers.add_parser("status", help="Show synchronization status") + # Reset command - reset_parser = subparsers.add_parser('reset', help='Reset failed sync records to pending') - reset_parser.add_argument('--force', action='store_true', - help='Reset without confirmation prompt') + reset_parser = subparsers.add_parser( + "reset", help="Reset failed sync records to pending" + ) + reset_parser.add_argument( + "--force", action="store_true", help="Reset without confirmation prompt" + ) # Backfill command - backfill_parser = subparsers.add_parser('backfill', - help='Backfill activity details for existing activities') - backfill_parser.add_argument('--limit', type=int, default=100, - help='Maximum number of activities to process (default: 100)') - backfill_parser.add_argument('--progress', choices=['tqdm', 'simple', 'silent'], - default='tqdm', - help='Progress display mode (default: tqdm)') + backfill_parser = subparsers.add_parser( + "backfill", help="Backfill activity details for existing activities" + ) + backfill_parser.add_argument( + "--limit", + type=int, + default=100, + help="Maximum number of activities to process (default: 100)", + ) + backfill_parser.add_argument( + "--progress", + choices=["tqdm", "simple", "silent"], + default="tqdm", + help="Progress display mode (default: tqdm)", + ) # Backfill splits command - backfill_splits_parser = subparsers.add_parser('backfill-splits', - help='Backfill splits/laps for cardio activities') - backfill_splits_parser.add_argument('--limit', type=int, default=100, - help='Maximum number of activities to process (default: 100)') - backfill_splits_parser.add_argument('--progress', choices=['tqdm', 'simple', 'silent'], - default='tqdm', - help='Progress display mode (default: tqdm)') + backfill_splits_parser = subparsers.add_parser( + "backfill-splits", help="Backfill splits/laps for cardio activities" + ) + backfill_splits_parser.add_argument( + "--limit", + type=int, + default=100, + help="Maximum number of activities to process (default: 100)", + ) + backfill_splits_parser.add_argument( + "--progress", + choices=["tqdm", "simple", "silent"], + default="tqdm", + help="Progress display mode (default: tqdm)", + ) return parser @@ -417,26 +475,26 @@ def main() -> int: """Main CLI entry point.""" parser = create_parser() args = parser.parse_args() - + if not args.command: parser.print_help() return 1 - + # Execute command - if args.command == 'sync': + if args.command == "sync": return cmd_sync(args) - elif args.command == 'status': + elif args.command == "status": return cmd_status(args) - elif args.command == 'reset': + elif args.command == "reset": return cmd_reset(args) - elif args.command == 'backfill': + elif args.command == "backfill": return cmd_backfill(args) - elif args.command == 'backfill-splits': + elif args.command == "backfill-splits": return cmd_backfill_splits(args) else: print(f"Unknown command: {args.command}") return 1 -if __name__ == '__main__': - sys.exit(main()) \ No newline at end of file +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/garmy/localdb/config.py b/src/garmy/localdb/config.py index 0f112ad..70331a1 100644 --- a/src/garmy/localdb/config.py +++ b/src/garmy/localdb/config.py @@ -1,32 +1,32 @@ """Configuration for localdb module.""" from dataclasses import dataclass, field -from typing import Optional from pathlib import Path +from typing import Optional @dataclass class SyncConfig: """Sync operation configuration.""" - + # Retry settings max_retries: int = 3 retry_exponential_base: int = 2 - + # Rate limiting rate_limit_delay: float = 0.5 - + # Progress reporting progress_reporter: str = "logging" # logging, tqdm, rich, json, silent progress_show_details: bool = True progress_log_interval: int = 50 # For logging reporter - + # Activities API (handled by iterator) activities_batch_size: int = 50 - + # Timeseries validation min_timeseries_fields: int = 2 - + # Sync range limits max_sync_days: int = 3650 # ~10 years maximum sync range @@ -34,11 +34,11 @@ class SyncConfig: @dataclass class DatabaseConfig: """Database configuration.""" - + # Connection settings timeout: float = 30.0 enable_wal_mode: bool = True - + # Timestamp conversion ms_per_second: int = 1000 seconds_per_day: int = 24 * 60 * 60 @@ -47,6 +47,6 @@ class DatabaseConfig: @dataclass class LocalDBConfig: """Complete localdb configuration.""" - + sync: SyncConfig = field(default_factory=SyncConfig) - database: DatabaseConfig = field(default_factory=DatabaseConfig) \ No newline at end of file + database: DatabaseConfig = field(default_factory=DatabaseConfig) diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index a76ef54..1509ceb 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -1,13 +1,23 @@ """SQLAlchemy database for health metrics storage.""" -from datetime import date +from datetime import date, datetime from pathlib import Path -from typing import List, Dict, Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict, List, Optional -from sqlalchemy import create_engine, and_, text, inspect -from sqlalchemy.orm import sessionmaker, Session +from sqlalchemy import and_, create_engine, inspect, text +from sqlalchemy.orm import Session, sessionmaker -from .models import Base, TimeSeries, Activity, DailyHealthMetric, SyncStatus, MetricType, ExerciseSet, ActivitySplit +from .models import ( + Activity, + ActivitySplit, + Base, + BodyComposition, + DailyHealthMetric, + ExerciseSet, + MetricType, + SyncStatus, + TimeSeries, +) if TYPE_CHECKING: from .config import DatabaseConfig @@ -15,29 +25,32 @@ DatabaseConfig = None -def _get_default_config() -> 'DatabaseConfig': +def _get_default_config() -> "DatabaseConfig": """Get default database configuration.""" if DatabaseConfig is None: from .config import DatabaseConfig as _DatabaseConfig + return _DatabaseConfig() return DatabaseConfig() class HealthDB: """SQLAlchemy database for health metrics.""" - - def __init__(self, - db_path: Path = Path("health.db"), - config: Optional['DatabaseConfig'] = None): + + def __init__( + self, + db_path: Path = Path("health.db"), + config: Optional["DatabaseConfig"] = None, + ): """Initialize database. - + Args: db_path: Path to SQLite database file. config: Database configuration. """ self.db_path = db_path self.config = config if config is not None else _get_default_config() - + self.engine = create_engine(f"sqlite:///{db_path}") self.SessionLocal = sessionmaker(bind=self.engine) @@ -51,31 +64,66 @@ def _migrate_schema(self): inspector = inspect(self.engine) # Check if activities table exists and needs migration - if 'activities' in inspector.get_table_names(): - existing_columns = {col['name'] for col in inspector.get_columns('activities')} + if "activities" in inspector.get_table_names(): + existing_columns = { + col["name"] for col in inspector.get_columns("activities") + } # New columns to add to activities table new_activity_columns = [ - ('activity_type', 'VARCHAR'), - ('distance_meters', 'FLOAT'), - ('calories', 'INTEGER'), - ('elevation_gain', 'FLOAT'), - ('elevation_loss', 'FLOAT'), - ('avg_speed', 'FLOAT'), - ('max_speed', 'FLOAT'), - ('max_heart_rate', 'INTEGER'), - ('total_sets', 'INTEGER'), - ('total_reps', 'INTEGER'), - ('total_weight_kg', 'FLOAT'), - ('details_synced', 'BOOLEAN DEFAULT 0'), - ('updated_at', 'DATETIME'), + ("activity_type", "VARCHAR"), + ("distance_meters", "FLOAT"), + ("calories", "INTEGER"), + ("elevation_gain", "FLOAT"), + ("elevation_loss", "FLOAT"), + ("avg_speed", "FLOAT"), + ("max_speed", "FLOAT"), + ("max_heart_rate", "INTEGER"), + ("total_sets", "INTEGER"), + ("total_reps", "INTEGER"), + ("total_weight_kg", "FLOAT"), + ("details_synced", "BOOLEAN DEFAULT 0"), + ("updated_at", "DATETIME"), ] with self.engine.connect() as conn: for col_name, col_type in new_activity_columns: if col_name not in existing_columns: try: - conn.execute(text(f'ALTER TABLE activities ADD COLUMN {col_name} {col_type}')) + conn.execute( + text( + f"ALTER TABLE activities ADD COLUMN {col_name} {col_type}" + ) + ) + conn.commit() + except Exception: + # Column might already exist or other issue, continue + pass + + # Migrate daily_health_metrics table for new sleep/skin temp columns + if "daily_health_metrics" in inspector.get_table_names(): + existing_columns = { + col["name"] for col in inspector.get_columns("daily_health_metrics") + } + + new_health_columns = [ + ("sleep_score", "INTEGER"), + ("sleep_score_qualifier", "VARCHAR"), + ("sleep_bedtime", "VARCHAR"), + ("sleep_wake_time", "VARCHAR"), + ("sleep_need_minutes", "INTEGER"), + ("skin_temp_deviation_c", "FLOAT"), + ] + + with self.engine.connect() as conn: + for col_name, col_type in new_health_columns: + if col_name not in existing_columns: + try: + conn.execute( + text( + f"ALTER TABLE daily_health_metrics ADD COLUMN {col_name} {col_type}" + ) + ) conn.commit() except Exception: # Column might already exist or other issue, continue @@ -86,24 +134,34 @@ def _migrate_schema(self): def get_session(self) -> Session: """Get database session.""" return self.SessionLocal() - + def get_schema_info(self) -> Dict[str, Any]: """Get database schema information.""" return { "tables": [table.name for table in Base.metadata.tables.values()], - "db_path": str(self.db_path) + "db_path": str(self.db_path), } - + def validate_schema(self) -> bool: """Validate database schema.""" try: - expected_tables = {'timeseries', 'activities', 'daily_health_metrics', 'sync_status', 'exercise_sets', 'activity_splits'} + expected_tables = { + "timeseries", + "activities", + "daily_health_metrics", + "sync_status", + "exercise_sets", + "activity_splits", + "body_composition", + } actual_tables = set(Base.metadata.tables.keys()) return expected_tables.issubset(actual_tables) except Exception: return False - - def store_timeseries_batch(self, user_id: int, metric_type: MetricType, data: List[tuple]): + + def store_timeseries_batch( + self, user_id: int, metric_type: MetricType, data: List[tuple] + ): """Store batch of timeseries data.""" with self.get_session() as session: for timestamp, value, metadata in data: @@ -115,283 +173,363 @@ def store_timeseries_batch(self, user_id: int, metric_type: MetricType, data: Li metric_type=metric_type.value, timestamp=timestamp, value=value, - meta_data=metadata + meta_data=metadata, ) session.merge(timeseries) session.commit() - + def store_activity(self, user_id: int, activity_data: Dict[str, Any]): """Store activity data including all available fields from API.""" with self.get_session() as session: activity = Activity( user_id=user_id, - activity_id=activity_data['activity_id'], - activity_date=activity_data['activity_date'], - activity_name=activity_data.get('activity_name'), - duration_seconds=activity_data.get('duration_seconds'), - avg_heart_rate=activity_data.get('avg_heart_rate'), - max_heart_rate=activity_data.get('max_heart_rate'), - training_load=activity_data.get('training_load'), - start_time=activity_data.get('start_time'), + activity_id=activity_data["activity_id"], + activity_date=activity_data["activity_date"], + activity_name=activity_data.get("activity_name"), + duration_seconds=activity_data.get("duration_seconds"), + avg_heart_rate=activity_data.get("avg_heart_rate"), + max_heart_rate=activity_data.get("max_heart_rate"), + training_load=activity_data.get("training_load"), + start_time=activity_data.get("start_time"), # Extended fields from activity list - activity_type=activity_data.get('activity_type'), - distance_meters=activity_data.get('distance_meters'), - calories=activity_data.get('calories'), - elevation_gain=activity_data.get('elevation_gain'), - elevation_loss=activity_data.get('elevation_loss'), - avg_speed=activity_data.get('avg_speed'), - max_speed=activity_data.get('max_speed'), + activity_type=activity_data.get("activity_type"), + distance_meters=activity_data.get("distance_meters"), + calories=activity_data.get("calories"), + elevation_gain=activity_data.get("elevation_gain"), + elevation_loss=activity_data.get("elevation_loss"), + avg_speed=activity_data.get("avg_speed"), + max_speed=activity_data.get("max_speed"), ) session.merge(activity) session.commit() - + def store_health_metric(self, user_id: int, metric_date: date, **kwargs): """Store daily health metric data.""" with self.get_session() as session: # Get existing record or create new one - metric = session.query(DailyHealthMetric).filter( - and_( - DailyHealthMetric.user_id == user_id, - DailyHealthMetric.metric_date == metric_date + metric = ( + session.query(DailyHealthMetric) + .filter( + and_( + DailyHealthMetric.user_id == user_id, + DailyHealthMetric.metric_date == metric_date, + ) ) - ).first() - + .first() + ) + if metric is None: metric = DailyHealthMetric(user_id=user_id, metric_date=metric_date) - + # Update fields from kwargs for field, value in kwargs.items(): if hasattr(metric, field): setattr(metric, field, value) - + session.merge(metric) session.commit() - - - def create_sync_status(self, user_id: int, sync_date: date, metric_type: MetricType, status: str = 'pending'): + + def create_sync_status( + self, + user_id: int, + sync_date: date, + metric_type: MetricType, + status: str = "pending", + ): """Create sync status record.""" with self.get_session() as session: sync_status = SyncStatus( user_id=user_id, sync_date=sync_date, metric_type=metric_type.value, - status=status + status=status, ) session.merge(sync_status) session.commit() - - def update_sync_status(self, user_id: int, sync_date: date, metric_type: MetricType, - status: str, error_message: Optional[str] = None): + + def update_sync_status( + self, + user_id: int, + sync_date: date, + metric_type: MetricType, + status: str, + error_message: Optional[str] = None, + ): """Update sync status record.""" with self.get_session() as session: from datetime import datetime - sync_status = session.query(SyncStatus).filter( - and_( - SyncStatus.user_id == user_id, - SyncStatus.sync_date == sync_date, - SyncStatus.metric_type == metric_type.value + + sync_status = ( + session.query(SyncStatus) + .filter( + and_( + SyncStatus.user_id == user_id, + SyncStatus.sync_date == sync_date, + SyncStatus.metric_type == metric_type.value, + ) ) - ).first() - + .first() + ) + if sync_status: sync_status.status = status sync_status.synced_at = datetime.utcnow() if error_message: sync_status.error_message = error_message session.commit() - - def get_sync_status(self, user_id: int, sync_date: date, metric_type: MetricType) -> Optional[str]: + + def get_sync_status( + self, user_id: int, sync_date: date, metric_type: MetricType + ) -> Optional[str]: """Get sync status for specific metric.""" with self.get_session() as session: - sync_status = session.query(SyncStatus).filter( - and_( - SyncStatus.user_id == user_id, - SyncStatus.sync_date == sync_date, - SyncStatus.metric_type == metric_type.value + sync_status = ( + session.query(SyncStatus) + .filter( + and_( + SyncStatus.user_id == user_id, + SyncStatus.sync_date == sync_date, + SyncStatus.metric_type == metric_type.value, + ) ) - ).first() + .first() + ) return sync_status.status if sync_status else None - + def get_pending_metrics(self, user_id: int, sync_date: date) -> List[str]: """Get list of pending metrics for date.""" with self.get_session() as session: - pending_statuses = session.query(SyncStatus).filter( - and_( - SyncStatus.user_id == user_id, - SyncStatus.sync_date == sync_date, - SyncStatus.status == 'pending' + pending_statuses = ( + session.query(SyncStatus) + .filter( + and_( + SyncStatus.user_id == user_id, + SyncStatus.sync_date == sync_date, + SyncStatus.status == "pending", + ) ) - ).all() + .all() + ) return [status.metric_type for status in pending_statuses] - - def sync_status_exists(self, user_id: int, sync_date: date, metric_type: MetricType) -> bool: + + def sync_status_exists( + self, user_id: int, sync_date: date, metric_type: MetricType + ) -> bool: """Check if sync status record exists.""" with self.get_session() as session: - return session.query(SyncStatus).filter( - and_( - SyncStatus.user_id == user_id, - SyncStatus.sync_date == sync_date, - SyncStatus.metric_type == metric_type.value + return ( + session.query(SyncStatus) + .filter( + and_( + SyncStatus.user_id == user_id, + SyncStatus.sync_date == sync_date, + SyncStatus.metric_type == metric_type.value, + ) ) - ).first() is not None - - + .first() + is not None + ) + def activity_exists(self, user_id: int, activity_id: str) -> bool: """Check if activity exists.""" with self.get_session() as session: - return session.query(Activity).filter( - and_( - Activity.user_id == user_id, - Activity.activity_id == activity_id + return ( + session.query(Activity) + .filter( + and_( + Activity.user_id == user_id, Activity.activity_id == activity_id + ) ) - ).first() is not None - + .first() + is not None + ) + def health_metric_exists(self, user_id: int, metric_date: date) -> bool: """Check if health metric exists for date.""" with self.get_session() as session: - return session.query(DailyHealthMetric).filter( - and_( - DailyHealthMetric.user_id == user_id, - DailyHealthMetric.metric_date == metric_date + return ( + session.query(DailyHealthMetric) + .filter( + and_( + DailyHealthMetric.user_id == user_id, + DailyHealthMetric.metric_date == metric_date, + ) ) - ).first() is not None - - - def get_health_metrics(self, user_id: int, start_date: date, end_date: date) -> List[Dict[str, Any]]: + .first() + is not None + ) + + def get_health_metrics( + self, user_id: int, start_date: date, end_date: date + ) -> List[Dict[str, Any]]: """Query health metrics for date range.""" with self.get_session() as session: - metrics = session.query(DailyHealthMetric).filter( - and_( - DailyHealthMetric.user_id == user_id, - DailyHealthMetric.metric_date >= start_date, - DailyHealthMetric.metric_date <= end_date + metrics = ( + session.query(DailyHealthMetric) + .filter( + and_( + DailyHealthMetric.user_id == user_id, + DailyHealthMetric.metric_date >= start_date, + DailyHealthMetric.metric_date <= end_date, + ) ) - ).order_by(DailyHealthMetric.metric_date).all() - + .order_by(DailyHealthMetric.metric_date) + .all() + ) + return [self._metric_to_dict(metric) for metric in metrics] - - def get_activities(self, user_id: int, start_date: date, end_date: date, - activity_name: Optional[str] = None) -> List[Dict[str, Any]]: + + def get_activities( + self, + user_id: int, + start_date: date, + end_date: date, + activity_name: Optional[str] = None, + ) -> List[Dict[str, Any]]: """Query activities for date range.""" with self.get_session() as session: query = session.query(Activity).filter( and_( Activity.user_id == user_id, Activity.activity_date >= start_date, - Activity.activity_date <= end_date + Activity.activity_date <= end_date, ) ) - + if activity_name: query = query.filter(Activity.activity_name == activity_name) - + activities = query.order_by(Activity.activity_date).all() return [self._activity_to_dict(activity) for activity in activities] - - def get_timeseries(self, user_id: int, metric_type: MetricType, - start_timestamp: int, end_timestamp: int) -> List[tuple]: + + def get_timeseries( + self, + user_id: int, + metric_type: MetricType, + start_timestamp: int, + end_timestamp: int, + ) -> List[tuple]: """Query timeseries data for time range.""" with self.get_session() as session: - timeseries = session.query(TimeSeries).filter( - and_( - TimeSeries.user_id == user_id, - TimeSeries.metric_type == metric_type.value, - TimeSeries.timestamp >= start_timestamp, - TimeSeries.timestamp <= end_timestamp + timeseries = ( + session.query(TimeSeries) + .filter( + and_( + TimeSeries.user_id == user_id, + TimeSeries.metric_type == metric_type.value, + TimeSeries.timestamp >= start_timestamp, + TimeSeries.timestamp <= end_timestamp, + ) ) - ).order_by(TimeSeries.timestamp).all() - + .order_by(TimeSeries.timestamp) + .all() + ) + return [(ts.timestamp, ts.value, ts.meta_data) for ts in timeseries] - - + def _metric_to_dict(self, metric: DailyHealthMetric) -> Dict[str, Any]: """Convert DailyHealthMetric to dictionary.""" return { - 'user_id': metric.user_id, - 'metric_date': metric.metric_date, - 'total_steps': metric.total_steps, - 'step_goal': metric.step_goal, - 'total_distance_meters': metric.total_distance_meters, - 'total_calories': metric.total_calories, - 'active_calories': metric.active_calories, - 'bmr_calories': metric.bmr_calories, - 'resting_heart_rate': metric.resting_heart_rate, - 'max_heart_rate': metric.max_heart_rate, - 'min_heart_rate': metric.min_heart_rate, - 'average_heart_rate': metric.average_heart_rate, - 'avg_stress_level': metric.avg_stress_level, - 'max_stress_level': metric.max_stress_level, - 'body_battery_high': metric.body_battery_high, - 'body_battery_low': metric.body_battery_low, - 'sleep_duration_hours': metric.sleep_duration_hours, - 'deep_sleep_hours': metric.deep_sleep_hours, - 'light_sleep_hours': metric.light_sleep_hours, - 'rem_sleep_hours': metric.rem_sleep_hours, - 'awake_hours': metric.awake_hours, - 'deep_sleep_percentage': metric.deep_sleep_percentage, - 'light_sleep_percentage': metric.light_sleep_percentage, - 'rem_sleep_percentage': metric.rem_sleep_percentage, - 'awake_percentage': metric.awake_percentage, - 'average_spo2': metric.average_spo2, - 'average_respiration': metric.average_respiration, - 'training_readiness_score': metric.training_readiness_score, - 'training_readiness_level': metric.training_readiness_level, - 'training_readiness_feedback': metric.training_readiness_feedback, - 'hrv_weekly_avg': metric.hrv_weekly_avg, - 'hrv_last_night_avg': metric.hrv_last_night_avg, - 'hrv_status': metric.hrv_status, - 'avg_waking_respiration_value': metric.avg_waking_respiration_value, - 'avg_sleep_respiration_value': metric.avg_sleep_respiration_value, - 'lowest_respiration_value': metric.lowest_respiration_value, - 'highest_respiration_value': metric.highest_respiration_value, - 'created_at': metric.created_at, - 'updated_at': metric.updated_at + "user_id": metric.user_id, + "metric_date": metric.metric_date, + "total_steps": metric.total_steps, + "step_goal": metric.step_goal, + "total_distance_meters": metric.total_distance_meters, + "total_calories": metric.total_calories, + "active_calories": metric.active_calories, + "bmr_calories": metric.bmr_calories, + "resting_heart_rate": metric.resting_heart_rate, + "max_heart_rate": metric.max_heart_rate, + "min_heart_rate": metric.min_heart_rate, + "average_heart_rate": metric.average_heart_rate, + "avg_stress_level": metric.avg_stress_level, + "max_stress_level": metric.max_stress_level, + "body_battery_high": metric.body_battery_high, + "body_battery_low": metric.body_battery_low, + "sleep_duration_hours": metric.sleep_duration_hours, + "deep_sleep_hours": metric.deep_sleep_hours, + "light_sleep_hours": metric.light_sleep_hours, + "rem_sleep_hours": metric.rem_sleep_hours, + "awake_hours": metric.awake_hours, + "deep_sleep_percentage": metric.deep_sleep_percentage, + "light_sleep_percentage": metric.light_sleep_percentage, + "rem_sleep_percentage": metric.rem_sleep_percentage, + "awake_percentage": metric.awake_percentage, + "average_spo2": metric.average_spo2, + "average_respiration": metric.average_respiration, + "training_readiness_score": metric.training_readiness_score, + "training_readiness_level": metric.training_readiness_level, + "training_readiness_feedback": metric.training_readiness_feedback, + "hrv_weekly_avg": metric.hrv_weekly_avg, + "hrv_last_night_avg": metric.hrv_last_night_avg, + "hrv_status": metric.hrv_status, + "avg_waking_respiration_value": metric.avg_waking_respiration_value, + "avg_sleep_respiration_value": metric.avg_sleep_respiration_value, + "lowest_respiration_value": metric.lowest_respiration_value, + "highest_respiration_value": metric.highest_respiration_value, + # Sleep enhancements + "sleep_score": metric.sleep_score, + "sleep_score_qualifier": metric.sleep_score_qualifier, + "sleep_bedtime": metric.sleep_bedtime, + "sleep_wake_time": metric.sleep_wake_time, + "sleep_need_minutes": metric.sleep_need_minutes, + # Skin temperature + "skin_temp_deviation_c": metric.skin_temp_deviation_c, + "skin_temp_deviation_f": ( + metric.skin_temp_deviation_c * 1.8 + if metric.skin_temp_deviation_c + else None + ), + "created_at": metric.created_at, + "updated_at": metric.updated_at, } - + def _activity_to_dict(self, activity: Activity) -> Dict[str, Any]: """Convert Activity to dictionary.""" return { - 'user_id': activity.user_id, - 'activity_id': activity.activity_id, - 'activity_date': activity.activity_date, - 'activity_name': activity.activity_name, - 'duration_seconds': activity.duration_seconds, - 'avg_heart_rate': activity.avg_heart_rate, - 'training_load': activity.training_load, - 'start_time': activity.start_time, - 'created_at': activity.created_at, + "user_id": activity.user_id, + "activity_id": activity.activity_id, + "activity_date": activity.activity_date, + "activity_name": activity.activity_name, + "duration_seconds": activity.duration_seconds, + "avg_heart_rate": activity.avg_heart_rate, + "training_load": activity.training_load, + "start_time": activity.start_time, + "created_at": activity.created_at, # Extended activity details - 'activity_type': activity.activity_type, - 'distance_meters': activity.distance_meters, - 'calories': activity.calories, - 'elevation_gain': activity.elevation_gain, - 'elevation_loss': activity.elevation_loss, - 'avg_speed': activity.avg_speed, - 'max_speed': activity.max_speed, - 'max_heart_rate': activity.max_heart_rate, + "activity_type": activity.activity_type, + "distance_meters": activity.distance_meters, + "calories": activity.calories, + "elevation_gain": activity.elevation_gain, + "elevation_loss": activity.elevation_loss, + "avg_speed": activity.avg_speed, + "max_speed": activity.max_speed, + "max_heart_rate": activity.max_heart_rate, # Strength training summary - 'total_sets': activity.total_sets, - 'total_reps': activity.total_reps, - 'total_weight_kg': activity.total_weight_kg, - 'details_synced': activity.details_synced, - 'updated_at': activity.updated_at + "total_sets": activity.total_sets, + "total_reps": activity.total_reps, + "total_weight_kg": activity.total_weight_kg, + "details_synced": activity.details_synced, + "updated_at": activity.updated_at, } - def store_exercise_sets(self, user_id: int, activity_id: str, sets: List[Dict[str, Any]]): + def store_exercise_sets( + self, user_id: int, activity_id: str, sets: List[Dict[str, Any]] + ): """Store exercise sets for an activity.""" with self.get_session() as session: for set_data in sets: exercise_set = ExerciseSet( user_id=user_id, activity_id=activity_id, - set_order=set_data.get('set_order', 0), - exercise_category=set_data.get('exercise_category'), - exercise_name=set_data.get('exercise_name'), - set_type=set_data.get('set_type'), - repetition_count=set_data.get('repetition_count'), - weight_grams=set_data.get('weight_grams'), - duration_seconds=set_data.get('duration_seconds'), - start_time=set_data.get('start_time') + set_order=set_data.get("set_order", 0), + exercise_category=set_data.get("exercise_category"), + exercise_name=set_data.get("exercise_name"), + set_type=set_data.get("set_type"), + repetition_count=set_data.get("repetition_count"), + weight_grams=set_data.get("weight_grams"), + duration_seconds=set_data.get("duration_seconds"), + start_time=set_data.get("start_time"), ) session.merge(exercise_set) session.commit() @@ -399,42 +537,60 @@ def store_exercise_sets(self, user_id: int, activity_id: str, sets: List[Dict[st def get_exercise_sets(self, user_id: int, activity_id: str) -> List[Dict[str, Any]]: """Get exercise sets for an activity.""" with self.get_session() as session: - sets = session.query(ExerciseSet).filter( - and_( - ExerciseSet.user_id == user_id, - ExerciseSet.activity_id == activity_id + sets = ( + session.query(ExerciseSet) + .filter( + and_( + ExerciseSet.user_id == user_id, + ExerciseSet.activity_id == activity_id, + ) ) - ).order_by(ExerciseSet.set_order).all() + .order_by(ExerciseSet.set_order) + .all() + ) return [self._exercise_set_to_dict(s) for s in sets] - def get_all_exercise_sets(self, user_id: int, start_date: date, end_date: date) -> List[Dict[str, Any]]: + def get_all_exercise_sets( + self, user_id: int, start_date: date, end_date: date + ) -> List[Dict[str, Any]]: """Get all exercise sets for activities in date range.""" with self.get_session() as session: # Join with activities to filter by date - sets = session.query(ExerciseSet).join( - Activity, - and_( - ExerciseSet.user_id == Activity.user_id, - ExerciseSet.activity_id == Activity.activity_id + sets = ( + session.query(ExerciseSet) + .join( + Activity, + and_( + ExerciseSet.user_id == Activity.user_id, + ExerciseSet.activity_id == Activity.activity_id, + ), ) - ).filter( - and_( - ExerciseSet.user_id == user_id, - Activity.activity_date >= start_date, - Activity.activity_date <= end_date + .filter( + and_( + ExerciseSet.user_id == user_id, + Activity.activity_date >= start_date, + Activity.activity_date <= end_date, + ) ) - ).order_by(Activity.activity_date, ExerciseSet.set_order).all() + .order_by(Activity.activity_date, ExerciseSet.set_order) + .all() + ) return [self._exercise_set_to_dict(s) for s in sets] - def update_activity_details(self, user_id: int, activity_id: str, details: Dict[str, Any]): + def update_activity_details( + self, user_id: int, activity_id: str, details: Dict[str, Any] + ): """Update activity with detailed data.""" with self.get_session() as session: - activity = session.query(Activity).filter( - and_( - Activity.user_id == user_id, - Activity.activity_id == activity_id + activity = ( + session.query(Activity) + .filter( + and_( + Activity.user_id == user_id, Activity.activity_id == activity_id + ) ) - ).first() + .first() + ) if activity: for field, value in details.items(): @@ -443,140 +599,268 @@ def update_activity_details(self, user_id: int, activity_id: str, details: Dict[ activity.details_synced = True session.commit() - def get_activities_without_details(self, user_id: int, limit: int = 100) -> List[Dict[str, Any]]: + def get_activities_without_details( + self, user_id: int, limit: int = 100 + ) -> List[Dict[str, Any]]: """Get activities that haven't had details synced yet.""" with self.get_session() as session: - activities = session.query(Activity).filter( - and_( - Activity.user_id == user_id, - Activity.details_synced == False # noqa: E712 + activities = ( + session.query(Activity) + .filter( + and_( + Activity.user_id == user_id, + Activity.details_synced == False, # noqa: E712 + ) ) - ).order_by(Activity.activity_date.desc()).limit(limit).all() + .order_by(Activity.activity_date.desc()) + .limit(limit) + .all() + ) return [self._activity_to_dict(a) for a in activities] def _exercise_set_to_dict(self, exercise_set: ExerciseSet) -> Dict[str, Any]: """Convert ExerciseSet to dictionary.""" return { - 'user_id': exercise_set.user_id, - 'activity_id': exercise_set.activity_id, - 'set_order': exercise_set.set_order, - 'exercise_category': exercise_set.exercise_category, - 'exercise_name': exercise_set.exercise_name, - 'set_type': exercise_set.set_type, - 'repetition_count': exercise_set.repetition_count, - 'weight_grams': exercise_set.weight_grams, - 'weight_kg': exercise_set.weight_grams / 1000 if exercise_set.weight_grams else None, - 'duration_seconds': exercise_set.duration_seconds, - 'start_time': exercise_set.start_time, - 'created_at': exercise_set.created_at + "user_id": exercise_set.user_id, + "activity_id": exercise_set.activity_id, + "set_order": exercise_set.set_order, + "exercise_category": exercise_set.exercise_category, + "exercise_name": exercise_set.exercise_name, + "set_type": exercise_set.set_type, + "repetition_count": exercise_set.repetition_count, + "weight_grams": exercise_set.weight_grams, + "weight_kg": ( + exercise_set.weight_grams / 1000 if exercise_set.weight_grams else None + ), + "duration_seconds": exercise_set.duration_seconds, + "start_time": exercise_set.start_time, + "created_at": exercise_set.created_at, } - def store_activity_splits(self, user_id: int, activity_id: str, splits: List[Dict[str, Any]]): + def store_activity_splits( + self, user_id: int, activity_id: str, splits: List[Dict[str, Any]] + ): """Store lap/split data for an activity.""" with self.get_session() as session: for split_data in splits: split = ActivitySplit( user_id=user_id, activity_id=activity_id, - lap_index=split_data.get('lap_index', 0), - start_time=split_data.get('start_time'), - duration_seconds=split_data.get('duration_seconds'), - moving_duration_seconds=split_data.get('moving_duration_seconds'), - distance_meters=split_data.get('distance_meters'), - avg_speed=split_data.get('avg_speed'), - max_speed=split_data.get('max_speed'), - avg_moving_speed=split_data.get('avg_moving_speed'), - avg_heart_rate=split_data.get('avg_heart_rate'), - max_heart_rate=split_data.get('max_heart_rate'), - elevation_gain=split_data.get('elevation_gain'), - elevation_loss=split_data.get('elevation_loss'), - max_elevation=split_data.get('max_elevation'), - min_elevation=split_data.get('min_elevation'), - avg_cadence=split_data.get('avg_cadence'), - max_cadence=split_data.get('max_cadence'), - calories=split_data.get('calories'), - start_latitude=split_data.get('start_latitude'), - start_longitude=split_data.get('start_longitude'), - end_latitude=split_data.get('end_latitude'), - end_longitude=split_data.get('end_longitude'), - intensity_type=split_data.get('intensity_type'), + lap_index=split_data.get("lap_index", 0), + start_time=split_data.get("start_time"), + duration_seconds=split_data.get("duration_seconds"), + moving_duration_seconds=split_data.get("moving_duration_seconds"), + distance_meters=split_data.get("distance_meters"), + avg_speed=split_data.get("avg_speed"), + max_speed=split_data.get("max_speed"), + avg_moving_speed=split_data.get("avg_moving_speed"), + avg_heart_rate=split_data.get("avg_heart_rate"), + max_heart_rate=split_data.get("max_heart_rate"), + elevation_gain=split_data.get("elevation_gain"), + elevation_loss=split_data.get("elevation_loss"), + max_elevation=split_data.get("max_elevation"), + min_elevation=split_data.get("min_elevation"), + avg_cadence=split_data.get("avg_cadence"), + max_cadence=split_data.get("max_cadence"), + calories=split_data.get("calories"), + start_latitude=split_data.get("start_latitude"), + start_longitude=split_data.get("start_longitude"), + end_latitude=split_data.get("end_latitude"), + end_longitude=split_data.get("end_longitude"), + intensity_type=split_data.get("intensity_type"), ) session.merge(split) session.commit() - def get_activity_splits(self, user_id: int, activity_id: str) -> List[Dict[str, Any]]: + def get_activity_splits( + self, user_id: int, activity_id: str + ) -> List[Dict[str, Any]]: """Get lap/split data for an activity.""" with self.get_session() as session: - splits = session.query(ActivitySplit).filter( - and_( - ActivitySplit.user_id == user_id, - ActivitySplit.activity_id == activity_id + splits = ( + session.query(ActivitySplit) + .filter( + and_( + ActivitySplit.user_id == user_id, + ActivitySplit.activity_id == activity_id, + ) ) - ).order_by(ActivitySplit.lap_index).all() + .order_by(ActivitySplit.lap_index) + .all() + ) return [self._split_to_dict(s) for s in splits] - def get_all_activity_splits(self, user_id: int, start_date: date, end_date: date) -> List[Dict[str, Any]]: + def get_all_activity_splits( + self, user_id: int, start_date: date, end_date: date + ) -> List[Dict[str, Any]]: """Get all splits for activities in date range.""" with self.get_session() as session: # Join with activities to filter by date - splits = session.query(ActivitySplit).join( - Activity, - and_( - ActivitySplit.user_id == Activity.user_id, - ActivitySplit.activity_id == Activity.activity_id + splits = ( + session.query(ActivitySplit) + .join( + Activity, + and_( + ActivitySplit.user_id == Activity.user_id, + ActivitySplit.activity_id == Activity.activity_id, + ), ) - ).filter( - and_( - ActivitySplit.user_id == user_id, - Activity.activity_date >= start_date, - Activity.activity_date <= end_date + .filter( + and_( + ActivitySplit.user_id == user_id, + Activity.activity_date >= start_date, + Activity.activity_date <= end_date, + ) ) - ).order_by(Activity.activity_date, ActivitySplit.lap_index).all() + .order_by(Activity.activity_date, ActivitySplit.lap_index) + .all() + ) return [self._split_to_dict(s) for s in splits] def activity_has_splits(self, user_id: int, activity_id: str) -> bool: """Check if activity already has splits stored.""" with self.get_session() as session: - return session.query(ActivitySplit).filter( - and_( - ActivitySplit.user_id == user_id, - ActivitySplit.activity_id == activity_id + return ( + session.query(ActivitySplit) + .filter( + and_( + ActivitySplit.user_id == user_id, + ActivitySplit.activity_id == activity_id, + ) ) - ).first() is not None + .first() + is not None + ) def _split_to_dict(self, split: ActivitySplit) -> Dict[str, Any]: """Convert ActivitySplit to dictionary.""" # Calculate pace in min/km if we have distance and duration pace_min_km = None - if split.distance_meters and split.duration_seconds and split.distance_meters > 0: + if ( + split.distance_meters + and split.duration_seconds + and split.distance_meters > 0 + ): pace_min_km = (split.duration_seconds / 60) / (split.distance_meters / 1000) return { - 'user_id': split.user_id, - 'activity_id': split.activity_id, - 'lap_index': split.lap_index, - 'start_time': split.start_time, - 'duration_seconds': split.duration_seconds, - 'moving_duration_seconds': split.moving_duration_seconds, - 'distance_meters': split.distance_meters, - 'distance_km': split.distance_meters / 1000 if split.distance_meters else None, - 'avg_speed': split.avg_speed, - 'max_speed': split.max_speed, - 'avg_moving_speed': split.avg_moving_speed, - 'pace_min_km': pace_min_km, - 'avg_heart_rate': split.avg_heart_rate, - 'max_heart_rate': split.max_heart_rate, - 'elevation_gain': split.elevation_gain, - 'elevation_loss': split.elevation_loss, - 'max_elevation': split.max_elevation, - 'min_elevation': split.min_elevation, - 'avg_cadence': split.avg_cadence, - 'max_cadence': split.max_cadence, - 'calories': split.calories, - 'start_latitude': split.start_latitude, - 'start_longitude': split.start_longitude, - 'end_latitude': split.end_latitude, - 'end_longitude': split.end_longitude, - 'intensity_type': split.intensity_type, - 'created_at': split.created_at - } \ No newline at end of file + "user_id": split.user_id, + "activity_id": split.activity_id, + "lap_index": split.lap_index, + "start_time": split.start_time, + "duration_seconds": split.duration_seconds, + "moving_duration_seconds": split.moving_duration_seconds, + "distance_meters": split.distance_meters, + "distance_km": ( + split.distance_meters / 1000 if split.distance_meters else None + ), + "avg_speed": split.avg_speed, + "max_speed": split.max_speed, + "avg_moving_speed": split.avg_moving_speed, + "pace_min_km": pace_min_km, + "avg_heart_rate": split.avg_heart_rate, + "max_heart_rate": split.max_heart_rate, + "elevation_gain": split.elevation_gain, + "elevation_loss": split.elevation_loss, + "max_elevation": split.max_elevation, + "min_elevation": split.min_elevation, + "avg_cadence": split.avg_cadence, + "max_cadence": split.max_cadence, + "calories": split.calories, + "start_latitude": split.start_latitude, + "start_longitude": split.start_longitude, + "end_latitude": split.end_latitude, + "end_longitude": split.end_longitude, + "intensity_type": split.intensity_type, + "created_at": split.created_at, + } + + def store_body_composition(self, user_id: int, entry: Dict[str, Any]): + """Store body composition measurement.""" + # Convert measurement_date string to date object if needed + measurement_date = entry["measurement_date"] + if isinstance(measurement_date, str): + measurement_date = date.fromisoformat(measurement_date) + + with self.get_session() as session: + composition = BodyComposition( + user_id=user_id, + sample_pk=entry["sample_pk"], + measurement_date=measurement_date, + timestamp_gmt=( + datetime.fromtimestamp(entry["timestamp_gmt"] / 1000) + if entry.get("timestamp_gmt") + else None + ), + weight_grams=entry.get("weight_grams"), + bmi=entry.get("bmi"), + body_fat_percentage=entry.get("body_fat_percentage"), + body_water_percentage=entry.get("body_water_percentage"), + bone_mass_grams=entry.get("bone_mass_grams"), + muscle_mass_grams=entry.get("muscle_mass_grams"), + visceral_fat=entry.get("visceral_fat"), + metabolic_age=entry.get("metabolic_age"), + physique_rating=entry.get("physique_rating"), + source_type=entry.get("source_type"), + ) + session.merge(composition) + session.commit() + + def get_body_composition( + self, user_id: int, start_date: date, end_date: date + ) -> List[Dict[str, Any]]: + """Get body composition measurements for date range.""" + with self.get_session() as session: + measurements = ( + session.query(BodyComposition) + .filter( + and_( + BodyComposition.user_id == user_id, + BodyComposition.measurement_date >= start_date, + BodyComposition.measurement_date <= end_date, + ) + ) + .order_by(BodyComposition.measurement_date) + .all() + ) + return [self._body_composition_to_dict(m) for m in measurements] + + def body_composition_exists(self, user_id: int, sample_pk: str) -> bool: + """Check if body composition entry exists.""" + with self.get_session() as session: + return ( + session.query(BodyComposition) + .filter( + and_( + BodyComposition.user_id == user_id, + BodyComposition.sample_pk == sample_pk, + ) + ) + .first() + is not None + ) + + def _body_composition_to_dict(self, bc: BodyComposition) -> Dict[str, Any]: + """Convert BodyComposition to dictionary.""" + return { + "user_id": bc.user_id, + "sample_pk": bc.sample_pk, + "measurement_date": bc.measurement_date, + "timestamp_gmt": bc.timestamp_gmt, + "weight_grams": bc.weight_grams, + "weight_kg": bc.weight_grams / 1000 if bc.weight_grams else None, + "bmi": bc.bmi, + "body_fat_percentage": bc.body_fat_percentage, + "body_water_percentage": bc.body_water_percentage, + "bone_mass_grams": bc.bone_mass_grams, + "bone_mass_kg": bc.bone_mass_grams / 1000 if bc.bone_mass_grams else None, + "muscle_mass_grams": bc.muscle_mass_grams, + "muscle_mass_kg": ( + bc.muscle_mass_grams / 1000 if bc.muscle_mass_grams else None + ), + "visceral_fat": bc.visceral_fat, + "metabolic_age": bc.metabolic_age, + "physique_rating": bc.physique_rating, + "source_type": bc.source_type, + "created_at": bc.created_at, + } diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 26f9848..4de7a97 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -1,14 +1,17 @@ """Data extraction utilities for converting API responses to database format.""" from datetime import date -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union + from .models import MetricType class DataExtractor: """Extracts and normalizes data from API responses for database storage.""" - - def extract_metric_data(self, data: Any, metric_type: MetricType) -> Optional[Dict]: + + def extract_metric_data( + self, data: Any, metric_type: MetricType + ) -> Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]: """Extract data based on metric type.""" if metric_type == MetricType.DAILY_SUMMARY: return self._extract_daily_summary_data(data) @@ -32,157 +35,233 @@ def extract_metric_data(self, data: Any, metric_type: MetricType) -> Optional[Di return self._extract_stress_summary(data) elif metric_type == MetricType.BODY_BATTERY: return self._extract_body_battery_summary(data) + elif metric_type == MetricType.BODY_COMPOSITION: + return self._extract_body_composition_data(data) else: return None - + def _extract_daily_summary_data(self, data: Any) -> Dict[str, Any]: """Extract daily summary data.""" return { # Steps and movement - 'total_steps': getattr(data, 'total_steps', None), - 'step_goal': getattr(data, 'daily_step_goal', None), # Correct attribute name! - 'total_distance_meters': getattr(data, 'total_distance_meters', None), - + "total_steps": getattr(data, "total_steps", None), + "step_goal": getattr( + data, "daily_step_goal", None + ), # Correct attribute name! + "total_distance_meters": getattr(data, "total_distance_meters", None), # Calories - 'total_calories': getattr(data, 'total_kilocalories', None), - 'active_calories': getattr(data, 'active_kilocalories', None), - 'bmr_calories': getattr(data, 'bmr_kilocalories', None), - + "total_calories": getattr(data, "total_kilocalories", None), + "active_calories": getattr(data, "active_kilocalories", None), + "bmr_calories": getattr(data, "bmr_kilocalories", None), # Heart rate - 'resting_heart_rate': getattr(data, 'resting_heart_rate', None), - 'max_heart_rate': getattr(data, 'max_heart_rate', None), - 'min_heart_rate': getattr(data, 'min_heart_rate', None), - 'average_heart_rate': getattr(data, 'average_heart_rate', None), - + "resting_heart_rate": getattr(data, "resting_heart_rate", None), + "max_heart_rate": getattr(data, "max_heart_rate", None), + "min_heart_rate": getattr(data, "min_heart_rate", None), + "average_heart_rate": getattr(data, "average_heart_rate", None), # Stress and recovery - 'avg_stress_level': getattr(data, 'avg_stress_level', None) or getattr(data, 'stress_avg', None), - 'max_stress_level': getattr(data, 'max_stress_level', None) or getattr(data, 'stress_max', None), - 'body_battery_high': getattr(data, 'body_battery_highest_value', None), - 'body_battery_low': getattr(data, 'body_battery_lowest_value', None), - + "avg_stress_level": getattr(data, "avg_stress_level", None) + or getattr(data, "stress_avg", None), + "max_stress_level": getattr(data, "max_stress_level", None) + or getattr(data, "stress_max", None), + "body_battery_high": getattr(data, "body_battery_highest_value", None), + "body_battery_low": getattr(data, "body_battery_lowest_value", None), # Additional metrics that might be in daily summary - 'average_spo2': getattr(data, 'average_sp_o2_value', None), - 'average_respiration': getattr(data, 'average_respiration_value', None) + "average_spo2": getattr(data, "average_sp_o2_value", None), + "average_respiration": getattr(data, "average_respiration_value", None), } - + def _extract_sleep_data(self, data: Any) -> Dict[str, Any]: """Extract sleep data from Sleep object.""" + from datetime import datetime + result = { # Use the built-in properties from Sleep class - 'sleep_duration_hours': getattr(data, 'sleep_duration_hours', None), - 'deep_sleep_percentage': getattr(data, 'deep_sleep_percentage', None), - 'light_sleep_percentage': getattr(data, 'light_sleep_percentage', None), - 'rem_sleep_percentage': getattr(data, 'rem_sleep_percentage', None), - 'awake_percentage': getattr(data, 'awake_percentage', None), - 'deep_sleep_hours': None, - 'light_sleep_hours': None, - 'rem_sleep_hours': None, - 'awake_hours': None, - 'average_spo2': None, - 'average_respiration': None, + "sleep_duration_hours": getattr(data, "sleep_duration_hours", None), + "deep_sleep_percentage": getattr(data, "deep_sleep_percentage", None), + "light_sleep_percentage": getattr(data, "light_sleep_percentage", None), + "rem_sleep_percentage": getattr(data, "rem_sleep_percentage", None), + "awake_percentage": getattr(data, "awake_percentage", None), + "deep_sleep_hours": None, + "light_sleep_hours": None, + "rem_sleep_hours": None, + "awake_hours": None, + "average_spo2": None, + "average_respiration": None, + # NEW: Sleep score + "sleep_score": None, + "sleep_score_qualifier": None, + # NEW: Sleep timing + "sleep_bedtime": None, + "sleep_wake_time": None, + "sleep_need_minutes": None, + # NEW: Skin temperature + "skin_temp_deviation_c": None, } # Extract from sleep_summary if available - if hasattr(data, 'sleep_summary') and data.sleep_summary: + if hasattr(data, "sleep_summary") and data.sleep_summary: summary = data.sleep_summary - deep = getattr(summary, 'deep_sleep_seconds', None) - light = getattr(summary, 'light_sleep_seconds', None) - rem = getattr(summary, 'rem_sleep_seconds', None) - awake = getattr(summary, 'awake_sleep_seconds', None) + deep = getattr(summary, "deep_sleep_seconds", None) + light = getattr(summary, "light_sleep_seconds", None) + rem = getattr(summary, "rem_sleep_seconds", None) + awake = getattr(summary, "awake_sleep_seconds", None) if deep and deep > 0: - result['deep_sleep_hours'] = deep / 3600 + result["deep_sleep_hours"] = deep / 3600 if light and light > 0: - result['light_sleep_hours'] = light / 3600 + result["light_sleep_hours"] = light / 3600 if rem and rem > 0: - result['rem_sleep_hours'] = rem / 3600 + result["rem_sleep_hours"] = rem / 3600 if awake and awake > 0: - result['awake_hours'] = awake / 3600 - - result['average_spo2'] = getattr(summary, 'average_sp_o2_value', None) - result['average_respiration'] = getattr(summary, 'average_respiration_value', None) + result["awake_hours"] = awake / 3600 + + result["average_spo2"] = getattr(summary, "average_sp_o2_value", None) + result["average_respiration"] = getattr( + summary, "average_respiration_value", None + ) + + # NEW: Extract sleep scores from nested dict + sleep_scores = getattr(summary, "sleep_scores", None) + if sleep_scores and isinstance(sleep_scores, dict): + overall = sleep_scores.get("overall", {}) + if isinstance(overall, dict): + result["sleep_score"] = overall.get("value") + result["sleep_score_qualifier"] = overall.get("qualifier_key") + + # NEW: Extract sleep need + sleep_need = getattr(summary, "sleep_need", None) + if sleep_need and isinstance(sleep_need, dict): + result["sleep_need_minutes"] = sleep_need.get("actual") + + # NEW: Convert timestamps to ISO strings + sleep_start = getattr(summary, "sleep_start_timestamp_local", None) + sleep_end = getattr(summary, "sleep_end_timestamp_local", None) + + if sleep_start: + try: + result["sleep_bedtime"] = datetime.fromtimestamp( + sleep_start / 1000 + ).isoformat() + except (ValueError, OSError): + pass + + if sleep_end: + try: + result["sleep_wake_time"] = datetime.fromtimestamp( + sleep_end / 1000 + ).isoformat() + except (ValueError, OSError): + pass + + # NEW: Extract skin temp from top-level Sleep object (not summary) + skin_temp = getattr(data, "skin_temp_deviation_c", None) + if skin_temp is not None: + result["skin_temp_deviation_c"] = skin_temp return result - + def _extract_heart_rate_summary(self, data: Any) -> Dict[str, Any]: """Extract heart rate summary data.""" # Heart rate data is in heart_rate_summary nested object - summary = getattr(data, 'heart_rate_summary', data) - + summary = getattr(data, "heart_rate_summary", data) + return { - 'resting_heart_rate': getattr(summary, 'resting_heart_rate', None), - 'max_heart_rate': getattr(summary, 'max_heart_rate', None), - 'min_heart_rate': getattr(summary, 'min_heart_rate', None), - 'average_heart_rate': getattr(data, 'average_heart_rate', None) # This is on main object + "resting_heart_rate": getattr(summary, "resting_heart_rate", None), + "max_heart_rate": getattr(summary, "max_heart_rate", None), + "min_heart_rate": getattr(summary, "min_heart_rate", None), + "average_heart_rate": getattr( + data, "average_heart_rate", None + ), # This is on main object } - + def _extract_stress_summary(self, data: Any) -> Dict[str, Any]: """Extract stress summary data.""" return { - 'avg_stress_level': getattr(data, 'avg_stress_level', None) or getattr(data, 'stress_avg', None), - 'max_stress_level': getattr(data, 'max_stress_level', None) or getattr(data, 'stress_max', None) + "avg_stress_level": getattr(data, "avg_stress_level", None) + or getattr(data, "stress_avg", None), + "max_stress_level": getattr(data, "max_stress_level", None) + or getattr(data, "stress_max", None), } - + def _extract_body_battery_summary(self, data: Any) -> Dict[str, Any]: """Extract body battery summary data.""" return { - 'body_battery_high': getattr(data, 'body_battery_highest_value', None) or getattr(data, 'highest_value', None), - 'body_battery_low': getattr(data, 'body_battery_lowest_value', None) or getattr(data, 'lowest_value', None) + "body_battery_high": getattr(data, "body_battery_highest_value", None) + or getattr(data, "highest_value", None), + "body_battery_low": getattr(data, "body_battery_lowest_value", None) + or getattr(data, "lowest_value", None), } - + def _extract_training_readiness_data(self, data: Any) -> Dict[str, Any]: """Extract training readiness nested data.""" return { - 'score': getattr(data, 'score', None), - 'level': getattr(data, 'level', None), - 'feedback': getattr(data, 'feedback_short', None) + "score": getattr(data, "score", None), + "level": getattr(data, "level", None), + "feedback": getattr(data, "feedback_short", None), } - + def _extract_hrv_data(self, data: Any) -> Dict[str, Any]: """Extract HRV using nested summary.""" - hrv_summary = getattr(data, 'hrv_summary', None) + hrv_summary = getattr(data, "hrv_summary", None) if hrv_summary: return { - 'weekly_avg': getattr(hrv_summary, 'weekly_avg', None), - 'last_night_avg': getattr(hrv_summary, 'last_night_avg', None), - 'status': getattr(hrv_summary, 'status', None) + "weekly_avg": getattr(hrv_summary, "weekly_avg", None), + "last_night_avg": getattr(hrv_summary, "last_night_avg", None), + "status": getattr(hrv_summary, "status", None), } return {} - + def _extract_respiration_summary(self, data: Any) -> Dict[str, Any]: """Extract respiration summary - unique respiratory metrics.""" # Try different possible locations for respiration data - summary = getattr(data, 'respiration_summary', None) + summary = getattr(data, "respiration_summary", None) if summary: return { - 'average_respiration': getattr(summary, 'average_respiration_value', None), - 'avg_waking_respiration_value': getattr(summary, 'avg_waking_respiration_value', None), - 'avg_sleep_respiration_value': getattr(summary, 'avg_sleep_respiration_value', None), - 'lowest_respiration_value': getattr(summary, 'lowest_respiration_value', None), - 'highest_respiration_value': getattr(summary, 'highest_respiration_value', None) + "average_respiration": getattr( + summary, "average_respiration_value", None + ), + "avg_waking_respiration_value": getattr( + summary, "avg_waking_respiration_value", None + ), + "avg_sleep_respiration_value": getattr( + summary, "avg_sleep_respiration_value", None + ), + "lowest_respiration_value": getattr( + summary, "lowest_respiration_value", None + ), + "highest_respiration_value": getattr( + summary, "highest_respiration_value", None + ), } - + # Also try direct attributes result = { - 'average_respiration': getattr(data, 'average_respiration_value', None), - 'avg_waking_respiration_value': getattr(data, 'avg_waking_respiration_value', None), - 'avg_sleep_respiration_value': getattr(data, 'avg_sleep_respiration_value', None), - 'lowest_respiration_value': getattr(data, 'lowest_respiration_value', None), - 'highest_respiration_value': getattr(data, 'highest_respiration_value', None) + "average_respiration": getattr(data, "average_respiration_value", None), + "avg_waking_respiration_value": getattr( + data, "avg_waking_respiration_value", None + ), + "avg_sleep_respiration_value": getattr( + data, "avg_sleep_respiration_value", None + ), + "lowest_respiration_value": getattr(data, "lowest_respiration_value", None), + "highest_respiration_value": getattr( + data, "highest_respiration_value", None + ), } - + # Return only if we have any data if any(v is not None for v in result.values()): return result - + return {} - + def _extract_activity_data(self, data: Any) -> Dict[str, Any]: """Extract activity data from both parsed and raw formats. Extracts comprehensive activity data from the activity list API response, which includes all the fields we need without requiring separate API calls. """ + # Handle both object attributes and dict keys def get_value(obj, *keys): for key in keys: @@ -202,91 +281,107 @@ def get_nested_value(obj, outer_key, inner_key): return getattr(outer, inner_key, None) return None - activity_id = get_value(data, 'activity_id', 'activityId') + activity_id = get_value(data, "activity_id", "activityId") if activity_id: # Extract activity type from nested activityType dict # Parsed ActivitySummary uses 'type_key', raw dict uses 'typeKey' - activity_type = get_nested_value(data, 'activity_type', 'type_key') + activity_type = get_nested_value(data, "activity_type", "type_key") if not activity_type: - activity_type = get_nested_value(data, 'activity_type', 'typeKey') + activity_type = get_nested_value(data, "activity_type", "typeKey") if not activity_type: - activity_type = get_nested_value(data, 'activityType', 'typeKey') + activity_type = get_nested_value(data, "activityType", "typeKey") return { - 'activity_id': activity_id, - 'activity_name': get_value(data, 'activity_name', 'activityName'), - 'duration_seconds': get_value(data, 'duration', 'movingDuration', 'elapsedDuration'), + "activity_id": activity_id, + "activity_name": get_value(data, "activity_name", "activityName"), + "duration_seconds": get_value( + data, "duration", "movingDuration", "elapsedDuration" + ), # Heart rate - parsed uses average_hr/max_hr, raw uses averageHR/maxHR - 'avg_heart_rate': get_value(data, 'average_hr', 'averageHR', 'avgHR'), - 'max_heart_rate': get_value(data, 'max_hr', 'maxHR'), - 'training_load': get_value(data, 'activity_training_load', 'activityTrainingLoad', 'trainingLoad'), - 'start_time': get_value(data, 'start_time_local', 'startTimeLocal', 'start_time'), + "avg_heart_rate": get_value(data, "average_hr", "averageHR", "avgHR"), + "max_heart_rate": get_value(data, "max_hr", "maxHR"), + "training_load": get_value( + data, + "activity_training_load", + "activityTrainingLoad", + "trainingLoad", + ), + "start_time": get_value( + data, "start_time_local", "startTimeLocal", "start_time" + ), # Activity type extracted above - 'activity_type': activity_type, + "activity_type": activity_type, # These may not be in parsed ActivitySummary, but try anyway - 'distance_meters': get_value(data, 'distance', 'distance_meters'), - 'calories': get_value(data, 'calories'), - 'elevation_gain': get_value(data, 'elevation_gain', 'elevationGain'), - 'elevation_loss': get_value(data, 'elevation_loss', 'elevationLoss'), - 'avg_speed': get_value(data, 'average_speed', 'averageSpeed'), - 'max_speed': get_value(data, 'max_speed', 'maxSpeed'), + "distance_meters": get_value(data, "distance", "distance_meters"), + "calories": get_value(data, "calories"), + "elevation_gain": get_value(data, "elevation_gain", "elevationGain"), + "elevation_loss": get_value(data, "elevation_loss", "elevationLoss"), + "avg_speed": get_value(data, "average_speed", "averageSpeed"), + "max_speed": get_value(data, "max_speed", "maxSpeed"), } return {} - - def extract_timeseries_data(self, data: Any, metric_type: MetricType) -> List[Tuple]: + + def extract_timeseries_data( + self, data: Any, metric_type: MetricType + ) -> List[Tuple]: """Extract timeseries data points from Garmy metrics.""" timeseries_data = [] - + if metric_type == MetricType.BODY_BATTERY: - if hasattr(data, 'body_battery_readings') and data.body_battery_readings: + if hasattr(data, "body_battery_readings") and data.body_battery_readings: for reading in data.body_battery_readings: if reading.level is None: continue metadata = { - 'status': getattr(reading, 'status', None), - 'version': getattr(reading, 'version', None) + "status": getattr(reading, "status", None), + "version": getattr(reading, "version", None), } timeseries_data.append((reading.timestamp, reading.level, metadata)) - + elif metric_type == MetricType.STRESS: - if hasattr(data, 'stress_readings') and data.stress_readings: + if hasattr(data, "stress_readings") and data.stress_readings: for reading in data.stress_readings: if reading.stress_level is None: continue metadata = {} - if hasattr(reading, 'stress_category'): - metadata['stress_category'] = reading.stress_category - timeseries_data.append((reading.timestamp, reading.stress_level, metadata)) - + if hasattr(reading, "stress_category"): + metadata["stress_category"] = reading.stress_category + timeseries_data.append( + (reading.timestamp, reading.stress_level, metadata) + ) + elif metric_type == MetricType.HEART_RATE: - if hasattr(data, 'heart_rate_values_array') and data.heart_rate_values_array: + if ( + hasattr(data, "heart_rate_values_array") + and data.heart_rate_values_array + ): for reading in data.heart_rate_values_array: if isinstance(reading, (list, tuple)) and len(reading) >= 2: timestamp, heart_rate = reading[0], reading[1] if heart_rate is not None: timeseries_data.append((timestamp, heart_rate, {})) - + elif metric_type == MetricType.RESPIRATION: # Respiration might have different format - check if it has readings - if hasattr(data, 'respiration_readings') and data.respiration_readings: + if hasattr(data, "respiration_readings") and data.respiration_readings: for reading in data.respiration_readings: timeseries_data.append((reading.timestamp, reading.value, {})) - + return timeseries_data - + def _extract_steps_data(self, data: Any) -> Dict[str, Any]: """Extract steps data.""" return { - 'total_steps': getattr(data, 'total_steps', None), - 'step_goal': getattr(data, 'step_goal', None) + "total_steps": getattr(data, "total_steps", None), + "step_goal": getattr(data, "step_goal", None), } - + def _extract_calories_data(self, data: Any) -> Dict[str, Any]: """Extract calories data.""" return { - 'total_calories': getattr(data, 'total_kilocalories', None), - 'active_calories': getattr(data, 'active_kilocalories', None), - 'bmr_calories': getattr(data, 'bmr_kilocalories', None) + "total_calories": getattr(data, "total_kilocalories", None), + "active_calories": getattr(data, "active_kilocalories", None), + "bmr_calories": getattr(data, "bmr_kilocalories", None), } def extract_activity_details(self, data: Dict) -> Dict[str, Any]: @@ -301,20 +396,24 @@ def extract_activity_details(self, data: Dict) -> Dict[str, Any]: if not data: return {} - activity_type_info = data.get('activityType', {}) + activity_type_info = data.get("activityType", {}) return { - 'activity_type': activity_type_info.get('typeKey') if activity_type_info else None, - 'distance_meters': data.get('distance'), - 'calories': data.get('calories'), - 'elevation_gain': data.get('elevationGain'), - 'elevation_loss': data.get('elevationLoss'), - 'avg_speed': data.get('avgSpeed'), - 'max_speed': data.get('maxSpeed'), - 'max_heart_rate': data.get('maxHR'), + "activity_type": ( + activity_type_info.get("typeKey") if activity_type_info else None + ), + "distance_meters": data.get("distance"), + "calories": data.get("calories"), + "elevation_gain": data.get("elevationGain"), + "elevation_loss": data.get("elevationLoss"), + "avg_speed": data.get("avgSpeed"), + "max_speed": data.get("maxSpeed"), + "max_heart_rate": data.get("maxHR"), } - def extract_exercise_sets(self, data: Dict, activity_id: str) -> List[Dict[str, Any]]: + def extract_exercise_sets( + self, data: Dict, activity_id: str + ) -> List[Dict[str, Any]]: """Extract exercise sets from exerciseSets API response. Args: @@ -328,30 +427,34 @@ def extract_exercise_sets(self, data: Dict, activity_id: str) -> List[Dict[str, return [] sets = [] - exercise_sets = data.get('exerciseSets', []) + exercise_sets = data.get("exerciseSets", []) for i, set_data in enumerate(exercise_sets): - exercises = set_data.get('exercises', []) + exercises = set_data.get("exercises", []) # Get most probable exercise category from the exercises list category = None exercise_name = None if exercises: # Sort by probability and get the best match - best_match = max(exercises, key=lambda x: x.get('probability', 0)) - category = best_match.get('category') - exercise_name = best_match.get('name') - - sets.append({ - 'set_order': i, - 'exercise_category': category, - 'exercise_name': exercise_name, - 'set_type': set_data.get('setType'), - 'repetition_count': set_data.get('repetitionCount'), - 'weight_grams': set_data.get('weight'), # API returns weight in milligrams - 'duration_seconds': set_data.get('duration'), - 'start_time': set_data.get('startTime') - }) + best_match = max(exercises, key=lambda x: x.get("probability", 0)) + category = best_match.get("category") + exercise_name = best_match.get("name") + + sets.append( + { + "set_order": i, + "exercise_category": category, + "exercise_name": exercise_name, + "set_type": set_data.get("setType"), + "repetition_count": set_data.get("repetitionCount"), + "weight_grams": set_data.get( + "weight" + ), # API returns weight in milligrams + "duration_seconds": set_data.get("duration"), + "start_time": set_data.get("startTime"), + } + ) return sets @@ -364,24 +467,26 @@ def calculate_strength_summary(self, sets: List[Dict[str, Any]]) -> Dict[str, An Returns: Dict with total_sets, total_reps, total_weight_kg """ - active_sets = [s for s in sets if s.get('set_type') == 'ACTIVE'] + active_sets = [s for s in sets if s.get("set_type") == "ACTIVE"] - total_reps = sum(s.get('repetition_count', 0) or 0 for s in active_sets) + total_reps = sum(s.get("repetition_count", 0) or 0 for s in active_sets) # Calculate total volume (sum of weight * reps for each set) total_volume_grams = 0 for s in active_sets: - weight = s.get('weight_grams', 0) or 0 - reps = s.get('repetition_count', 0) or 0 + weight = s.get("weight_grams", 0) or 0 + reps = s.get("repetition_count", 0) or 0 total_volume_grams += weight * reps return { - 'total_sets': len(active_sets), - 'total_reps': total_reps, - 'total_weight_kg': total_volume_grams / 1000 if total_volume_grams else 0 + "total_sets": len(active_sets), + "total_reps": total_reps, + "total_weight_kg": total_volume_grams / 1000 if total_volume_grams else 0, } - def extract_activity_splits(self, data: Dict, activity_id: str) -> List[Dict[str, Any]]: + def extract_activity_splits( + self, data: Dict, activity_id: str + ) -> List[Dict[str, Any]]: """Extract lap/split data from splits API response. Args: @@ -395,33 +500,39 @@ def extract_activity_splits(self, data: Dict, activity_id: str) -> List[Dict[str return [] splits = [] - lap_dtos = data.get('lapDTOs', []) + lap_dtos = data.get("lapDTOs", []) for lap in lap_dtos: - splits.append({ - 'lap_index': lap.get('lapIndex', 0), - 'start_time': lap.get('startTimeGMT'), - 'duration_seconds': lap.get('duration'), - 'moving_duration_seconds': lap.get('movingDuration'), - 'distance_meters': lap.get('distance'), - 'avg_speed': lap.get('averageSpeed'), - 'max_speed': lap.get('maxSpeed'), - 'avg_moving_speed': lap.get('averageMovingSpeed'), - 'avg_heart_rate': int(lap.get('averageHR')) if lap.get('averageHR') else None, - 'max_heart_rate': int(lap.get('maxHR')) if lap.get('maxHR') else None, - 'elevation_gain': lap.get('elevationGain'), - 'elevation_loss': lap.get('elevationLoss'), - 'max_elevation': lap.get('maxElevation'), - 'min_elevation': lap.get('minElevation'), - 'avg_cadence': lap.get('averageRunCadence'), - 'max_cadence': lap.get('maxRunCadence'), - 'calories': lap.get('calories'), - 'start_latitude': lap.get('startLatitude'), - 'start_longitude': lap.get('startLongitude'), - 'end_latitude': lap.get('endLatitude'), - 'end_longitude': lap.get('endLongitude'), - 'intensity_type': lap.get('intensityType'), - }) + splits.append( + { + "lap_index": lap.get("lapIndex", 0), + "start_time": lap.get("startTimeGMT"), + "duration_seconds": lap.get("duration"), + "moving_duration_seconds": lap.get("movingDuration"), + "distance_meters": lap.get("distance"), + "avg_speed": lap.get("averageSpeed"), + "max_speed": lap.get("maxSpeed"), + "avg_moving_speed": lap.get("averageMovingSpeed"), + "avg_heart_rate": ( + int(lap.get("averageHR")) if lap.get("averageHR") else None + ), + "max_heart_rate": ( + int(lap.get("maxHR")) if lap.get("maxHR") else None + ), + "elevation_gain": lap.get("elevationGain"), + "elevation_loss": lap.get("elevationLoss"), + "max_elevation": lap.get("maxElevation"), + "min_elevation": lap.get("minElevation"), + "avg_cadence": lap.get("averageRunCadence"), + "max_cadence": lap.get("maxRunCadence"), + "calories": lap.get("calories"), + "start_latitude": lap.get("startLatitude"), + "start_longitude": lap.get("startLongitude"), + "end_latitude": lap.get("endLatitude"), + "end_longitude": lap.get("endLongitude"), + "intensity_type": lap.get("intensityType"), + } + ) return splits @@ -434,15 +545,17 @@ def calculate_splits_summary(self, splits: List[Dict[str, Any]]) -> Dict[str, An Returns: Dict with total_laps and aggregated metrics """ - active_splits = [s for s in splits if s.get('intensity_type') == 'ACTIVE'] + active_splits = [s for s in splits if s.get("intensity_type") == "ACTIVE"] if not active_splits: - return {'total_laps': len(splits)} + return {"total_laps": len(splits)} - total_distance = sum(s.get('distance_meters', 0) or 0 for s in active_splits) - total_duration = sum(s.get('duration_seconds', 0) or 0 for s in active_splits) - total_elevation_gain = sum(s.get('elevation_gain', 0) or 0 for s in active_splits) - total_calories = sum(s.get('calories', 0) or 0 for s in active_splits) + total_distance = sum(s.get("distance_meters", 0) or 0 for s in active_splits) + total_duration = sum(s.get("duration_seconds", 0) or 0 for s in active_splits) + total_elevation_gain = sum( + s.get("elevation_gain", 0) or 0 for s in active_splits + ) + total_calories = sum(s.get("calories", 0) or 0 for s in active_splits) # Calculate average pace (min/km) if we have distance avg_pace_min_km = None @@ -451,10 +564,50 @@ def calculate_splits_summary(self, splits: List[Dict[str, Any]]) -> Dict[str, An avg_pace_min_km = (total_duration / 60) / (total_distance / 1000) return { - 'total_laps': len(active_splits), - 'total_distance_meters': total_distance, - 'total_duration_seconds': total_duration, - 'total_elevation_gain': total_elevation_gain, - 'total_calories': total_calories, - 'avg_pace_min_km': avg_pace_min_km - } \ No newline at end of file + "total_laps": len(active_splits), + "total_distance_meters": total_distance, + "total_duration_seconds": total_duration, + "total_elevation_gain": total_elevation_gain, + "total_calories": total_calories, + "avg_pace_min_km": avg_pace_min_km, + } + + def _extract_body_composition_data(self, data: Dict) -> List[Dict[str, Any]]: + """Extract body composition entries from weight service response. + + Args: + data: Raw API response from /weight-service/weight/range/ + + Returns: + List of body composition entry dicts + """ + entries = [] + + for summary in data.get("dailyWeightSummaries", []): + latest = summary.get("latestWeight") + if not latest: + continue + + sample_pk = latest.get("samplePk") + if not sample_pk: + continue + + entries.append( + { + "sample_pk": str(sample_pk), + "measurement_date": latest.get("calendarDate"), + "timestamp_gmt": latest.get("timestampGMT"), + "weight_grams": latest.get("weight"), + "bmi": latest.get("bmi"), + "body_fat_percentage": latest.get("bodyFat"), + "body_water_percentage": latest.get("bodyWater"), + "bone_mass_grams": latest.get("boneMass"), + "muscle_mass_grams": latest.get("muscleMass"), + "visceral_fat": latest.get("visceralFat"), + "metabolic_age": latest.get("metabolicAge"), + "physique_rating": latest.get("physiqueRating"), + "source_type": latest.get("sourceType"), + } + ) + + return entries diff --git a/src/garmy/localdb/models.py b/src/garmy/localdb/models.py index 489c639..f04a568 100644 --- a/src/garmy/localdb/models.py +++ b/src/garmy/localdb/models.py @@ -1,17 +1,27 @@ """SQLAlchemy models and enums for health database.""" -from datetime import date, datetime +from datetime import datetime from enum import Enum -from sqlalchemy import Column, Integer, String, Float, Date, DateTime, JSON, Text, Boolean +from sqlalchemy import ( + JSON, + Boolean, + Column, + Date, + DateTime, + Float, + Integer, + String, + Text, +) from sqlalchemy.orm import declarative_base - Base = declarative_base() class MetricType(Enum): """Health metric types that can be stored in the database.""" + DAILY_SUMMARY = "daily_summary" SLEEP = "sleep" ACTIVITIES = "activities" @@ -23,10 +33,12 @@ class MetricType(Enum): RESPIRATION = "respiration" STEPS = "steps" CALORIES = "calories" + BODY_COMPOSITION = "body_composition" class TimeSeries(Base): """High-frequency timeseries data (heart rate, stress, body battery, etc.).""" + __tablename__ = "timeseries" user_id = Column(Integer, primary_key=True, nullable=False) @@ -38,6 +50,7 @@ class TimeSeries(Base): class Activity(Base): """Individual activities and workouts with key metrics.""" + __tablename__ = "activities" user_id = Column(Integer, primary_key=True, nullable=False) @@ -72,11 +85,14 @@ class Activity(Base): class ExerciseSet(Base): """Exercise sets from strength training activities.""" + __tablename__ = "exercise_sets" user_id = Column(Integer, primary_key=True, nullable=False) activity_id = Column(String, primary_key=True, nullable=False) - set_order = Column(Integer, primary_key=True, nullable=False) # Order within activity + set_order = Column( + Integer, primary_key=True, nullable=False + ) # Order within activity exercise_category = Column(String) # CURL, BENCH_PRESS, SQUAT, etc. exercise_name = Column(String) @@ -91,11 +107,14 @@ class ExerciseSet(Base): class ActivitySplit(Base): """Lap/split data from cardio activities (running, cycling, walking, etc.).""" + __tablename__ = "activity_splits" user_id = Column(Integer, primary_key=True, nullable=False) activity_id = Column(String, primary_key=True, nullable=False) - lap_index = Column(Integer, primary_key=True, nullable=False) # 1-indexed lap number + lap_index = Column( + Integer, primary_key=True, nullable=False + ) # 1-indexed lap number # Timing start_time = Column(String) # ISO timestamp @@ -139,6 +158,7 @@ class ActivitySplit(Base): class DailyHealthMetric(Base): """Normalized daily health metrics with dedicated columns for efficient querying.""" + __tablename__ = "daily_health_metrics" user_id = Column(Integer, primary_key=True, nullable=False) @@ -190,12 +210,23 @@ class DailyHealthMetric(Base): lowest_respiration_value = Column(Float) highest_respiration_value = Column(Float) + # Sleep enhancements + sleep_score = Column(Integer) # 0-100 overall score + sleep_score_qualifier = Column(String) # POOR, FAIR, GOOD, EXCELLENT + sleep_bedtime = Column(String) # ISO timestamp string + sleep_wake_time = Column(String) # ISO timestamp string + sleep_need_minutes = Column(Integer) # Target sleep in minutes + + # Skin temperature (Celsius only - Fahrenheit computed on read) + skin_temp_deviation_c = Column(Float) + created_at = Column(DateTime, default=datetime.utcnow) updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) class SyncStatus(Base): """Sync status tracking for each metric per date.""" + __tablename__ = "sync_status" user_id = Column(Integer, primary_key=True, nullable=False) @@ -204,4 +235,32 @@ class SyncStatus(Base): status = Column(String, nullable=False) synced_at = Column(DateTime) error_message = Column(Text) - created_at = Column(DateTime, default=datetime.utcnow) \ No newline at end of file + created_at = Column(DateTime, default=datetime.utcnow) + + +class BodyComposition(Base): + """Body composition measurements from smart scales.""" + + __tablename__ = "body_composition" + + user_id = Column(Integer, primary_key=True, nullable=False) + sample_pk = Column(String, primary_key=True, nullable=False) # Garmin's unique ID + measurement_date = Column(Date, nullable=False, index=True) + timestamp_gmt = Column(DateTime) + + # Core measurements + weight_grams = Column(Float) + bmi = Column(Float) + body_fat_percentage = Column(Float) + body_water_percentage = Column(Float) + bone_mass_grams = Column(Float) + muscle_mass_grams = Column(Float) + + # Additional metrics (may be null depending on scale) + visceral_fat = Column(Float) + metabolic_age = Column(Integer) + physique_rating = Column(Float) + + # Metadata + source_type = Column(String) # e.g., "INDEX_SCALE" + created_at = Column(DateTime, default=datetime.utcnow) diff --git a/src/garmy/localdb/progress.py b/src/garmy/localdb/progress.py index 4f1125e..3c41200 100644 --- a/src/garmy/localdb/progress.py +++ b/src/garmy/localdb/progress.py @@ -3,6 +3,7 @@ import logging from datetime import date from typing import Optional + from tqdm import tqdm @@ -64,8 +65,8 @@ def warning(self, message: str): def error(self, message: str): """Log error message.""" self.logger.error(message) - + def end_sync(self): """End sync progress tracking.""" if self.pbar: - self.pbar.close() \ No newline at end of file + self.pbar.close() diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index bd5cb74..97b92d3 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -1,25 +1,28 @@ """Synchronization manager for Garmin health data.""" import asyncio +import time from datetime import date, datetime, timedelta -from typing import List, Dict, Any, Optional from pathlib import Path +from typing import Any, Dict, List, Optional -from .db import HealthDB +from .activities_iterator import ActivitiesIterator from .config import LocalDBConfig +from .db import HealthDB +from .extractors import DataExtractor from .models import MetricType from .progress import ProgressReporter -from .extractors import DataExtractor -from .activities_iterator import ActivitiesIterator class SyncManager: """Synchronization manager for health metrics.""" - def __init__(self, - db_path: Path = Path("health.db"), - config: Optional[LocalDBConfig] = None, - progress_reporter: Optional[ProgressReporter] = None): + def __init__( + self, + db_path: Path = Path("health.db"), + config: Optional[LocalDBConfig] = None, + progress_reporter: Optional[ProgressReporter] = None, + ): """Initialize sync manager. Args: @@ -45,7 +48,7 @@ def initialize(self, email: Optional[str] = None, password: Optional[str] = None password: Garmin account password (optional if tokens are saved) """ try: - from garmy import AuthClient, APIClient + from garmy import APIClient, AuthClient auth_client = AuthClient() @@ -68,9 +71,7 @@ def initialize(self, email: Optional[str] = None, password: Optional[str] = None self.api_client = APIClient(auth_client=auth_client) self.activities_iterator = ActivitiesIterator( - self.api_client, - self.config.sync, - self.progress + self.api_client, self.config.sync, self.progress ) self.activities_iterator.initialize() @@ -80,8 +81,13 @@ def initialize(self, email: Optional[str] = None, password: Optional[str] = None self.progress.error(f"Failed to initialize: {e}") raise - def sync_range(self, user_id: int, start_date: date, end_date: date, - metrics: Optional[List[MetricType]] = None) -> Dict[str, int]: + def sync_range( + self, + user_id: int, + start_date: date, + end_date: date, + metrics: Optional[List[MetricType]] = None, + ) -> Dict[str, int]: """Sync metrics for date range. Args: @@ -99,31 +105,51 @@ def sync_range(self, user_id: int, start_date: date, end_date: date, date_count = abs((end_date - start_date).days) + 1 if date_count > self.config.sync.max_sync_days: - raise ValueError(f"Date range too large: {date_count} days. Maximum allowed: {self.config.sync.max_sync_days} days") + raise ValueError( + f"Date range too large: {date_count} days. Maximum allowed: {self.config.sync.max_sync_days} days" + ) if metrics is None: metrics = list(MetricType) - # Separate activities from other metrics - they need different iteration order - non_activities_metrics = [m for m in metrics if m != MetricType.ACTIVITIES] + # Separate special metrics from regular date-by-date metrics + # Activities and body composition are handled separately + non_activities_metrics = [ + m + for m in metrics + if m not in (MetricType.ACTIVITIES, MetricType.BODY_COMPOSITION) + ] has_activities = MetricType.ACTIVITIES in metrics - total_tasks = date_count * len(metrics) + has_body_composition = MetricType.BODY_COMPOSITION in metrics + + # Calculate total tasks for progress reporting + total_tasks = date_count * len(non_activities_metrics) + if has_activities: + total_tasks += date_count + if has_body_composition: + total_tasks += 1 # Body composition is a single batch operation self.progress.start_sync(total_tasks) - stats = {'completed': 0, 'skipped': 0, 'failed': 0, 'total_tasks': total_tasks} + stats = {"completed": 0, "skipped": 0, "failed": 0, "total_tasks": total_tasks} try: # Create sync status entries for all dates for current_date in self._date_range(start_date, end_date): for metric_type in metrics: - if not self.db.sync_status_exists(user_id, current_date, metric_type): - self.db.create_sync_status(user_id, current_date, metric_type, 'pending') + if not self.db.sync_status_exists( + user_id, current_date, metric_type + ): + self.db.create_sync_status( + user_id, current_date, metric_type, "pending" + ) # Sync non-activities metrics (oldest to newest is fine) if non_activities_metrics: for current_date in self._date_range(start_date, end_date): - self._sync_date(user_id, current_date, non_activities_metrics, stats) + self._sync_date( + user_id, current_date, non_activities_metrics, stats + ) # Sync activities separately in REVERSE order (newest to oldest) # This matches the ActivitiesIterator which returns activities newest-first @@ -135,6 +161,10 @@ def sync_range(self, user_id: int, start_date: date, end_date: date, for current_date in self._date_range(end_date, start_date): self._sync_activities_for_date(user_id, current_date, stats) + # Sync body composition (single batch for entire range) + if has_body_composition: + self._sync_body_composition_batch(user_id, start_date, end_date, stats) + except Exception as e: raise finally: @@ -142,7 +172,13 @@ def sync_range(self, user_id: int, start_date: date, end_date: date, return stats - def _sync_date(self, user_id: int, sync_date: date, metrics: List[MetricType], stats: Dict[str, int]): + def _sync_date( + self, + user_id: int, + sync_date: date, + metrics: List[MetricType], + stats: Dict[str, int], + ): """Sync all non-activities metrics for a single date. Note: Activities are handled separately in sync_range() because they @@ -152,97 +188,137 @@ def _sync_date(self, user_id: int, sync_date: date, metrics: List[MetricType], s try: self._sync_metric_for_date(user_id, sync_date, metric_type, stats) except Exception as e: - self.db.update_sync_status(user_id, sync_date, metric_type, 'failed', str(e)) + self.db.update_sync_status( + user_id, sync_date, metric_type, "failed", str(e) + ) self.progress.task_failed(f"{metric_type.value}", sync_date) - stats['failed'] += 1 - - def _sync_metric_for_date(self, user_id: int, sync_date: date, metric_type: MetricType, stats: Dict[str, int]): + stats["failed"] += 1 + + def _sync_metric_for_date( + self, + user_id: int, + sync_date: date, + metric_type: MetricType, + stats: Dict[str, int], + ): """Sync a single metric for a date.""" if self._is_metric_completed(user_id, metric_type, sync_date): - stats['skipped'] += 1 + stats["skipped"] += 1 self.progress.task_skipped(f"{metric_type.value}", sync_date) return try: data = self.api_client.metrics.get(metric_type.value).get(sync_date) - + # Extract summary/daily data for health metrics table extracted_data = self.extractor.extract_metric_data(data, metric_type) summary_stored = False - - + if extracted_data and any(v is not None for v in extracted_data.values()): - self._store_health_metric(user_id, sync_date, metric_type, extracted_data) + self._store_health_metric( + user_id, sync_date, metric_type, extracted_data + ) summary_stored = True - + # Also extract timeseries data for applicable metrics timeseries_stored = False - if metric_type in [MetricType.BODY_BATTERY, MetricType.STRESS, MetricType.HEART_RATE, MetricType.RESPIRATION]: - timeseries_data = self.extractor.extract_timeseries_data(data, metric_type) + if metric_type in [ + MetricType.BODY_BATTERY, + MetricType.STRESS, + MetricType.HEART_RATE, + MetricType.RESPIRATION, + ]: + timeseries_data = self.extractor.extract_timeseries_data( + data, metric_type + ) if timeseries_data: - self.db.store_timeseries_batch(user_id, metric_type, timeseries_data) + self.db.store_timeseries_batch( + user_id, metric_type, timeseries_data + ) timeseries_stored = True - + # Update status based on what was stored if summary_stored or timeseries_stored: - self.db.update_sync_status(user_id, sync_date, metric_type, 'completed') - stats['completed'] += 1 + self.db.update_sync_status(user_id, sync_date, metric_type, "completed") + stats["completed"] += 1 else: - self.db.update_sync_status(user_id, sync_date, metric_type, 'skipped') - stats['skipped'] += 1 + self.db.update_sync_status(user_id, sync_date, metric_type, "skipped") + stats["skipped"] += 1 self.progress.task_complete(f"{metric_type.value}", sync_date) except Exception as e: - self.db.update_sync_status(user_id, sync_date, metric_type, 'failed', str(e)) + self.db.update_sync_status( + user_id, sync_date, metric_type, "failed", str(e) + ) self.progress.task_failed(f"{metric_type.value}", sync_date) - stats['failed'] += 1 + stats["failed"] += 1 - def _sync_activities_for_date(self, user_id: int, sync_date: date, stats: Dict[str, int]): + def _sync_activities_for_date( + self, user_id: int, sync_date: date, stats: Dict[str, int] + ): """Sync activities for a specific date.""" if not self.activities_iterator: - stats['failed'] += 1 + stats["failed"] += 1 return try: activities = self.activities_iterator.get_activities_for_date(sync_date) for activity in activities: - activity_data = self.extractor.extract_metric_data(activity, MetricType.ACTIVITIES) - if not activity_data or 'activity_id' not in activity_data: + activity_data = self.extractor.extract_metric_data( + activity, MetricType.ACTIVITIES + ) + if not activity_data or "activity_id" not in activity_data: continue - activity_id = activity_data['activity_id'] + activity_id = activity_data["activity_id"] if self.db.activity_exists(user_id, activity_id): - stats['skipped'] += 1 + stats["skipped"] += 1 continue - activity_data['activity_date'] = sync_date + activity_data["activity_date"] = sync_date self.db.store_activity(user_id, activity_data) - stats['completed'] += 1 + stats["completed"] += 1 # Fetch and store activity details (exercise sets for strength training) - activity_type = activity_data.get('activity_type') + activity_type = activity_data.get("activity_type") self._sync_activity_details(user_id, str(activity_id), activity_type) self.progress.task_complete("activities", sync_date) except Exception as e: self.progress.task_failed("activities", sync_date) - stats['failed'] += 1 + stats["failed"] += 1 # Activity types for fetching specific detail data - STRENGTH_TYPES = ['strength_training', 'indoor_strength_training'] + STRENGTH_TYPES = ["strength_training", "indoor_strength_training"] CARDIO_TYPES = [ - 'running', 'treadmill_running', 'trail_running', 'track_running', - 'cycling', 'indoor_cycling', 'virtual_ride', 'gravel_cycling', 'road_cycling', - 'walking', 'hiking', 'swimming', 'lap_swimming', 'open_water_swimming', - 'elliptical', 'stair_climbing', 'rowing', 'indoor_rowing' + "running", + "treadmill_running", + "trail_running", + "track_running", + "cycling", + "indoor_cycling", + "virtual_ride", + "gravel_cycling", + "road_cycling", + "walking", + "hiking", + "swimming", + "lap_swimming", + "open_water_swimming", + "elliptical", + "stair_climbing", + "rowing", + "indoor_rowing", ] - def _sync_activity_details(self, user_id: int, activity_id: str, activity_type: str = None): + def _sync_activity_details( + self, user_id: int, activity_id: str, activity_type: str = None + ): """Sync detailed data for a single activity. For strength training activities, fetches exercise sets (reps, weight, etc.). @@ -256,7 +332,7 @@ def _sync_activity_details(self, user_id: int, activity_id: str, activity_type: activity_type: Activity type key (e.g., 'strength_training', 'running') """ try: - activities_accessor = self.api_client.metrics.get('activities') + activities_accessor = self.api_client.metrics.get("activities") api_called = False # Fetch exercise sets for strength training activities @@ -271,14 +347,17 @@ def _sync_activity_details(self, user_id: int, activity_id: str, activity_type: # Apply rate limiting delay after API calls if api_called: - import time time.sleep(self.config.sync.rate_limit_delay) # Mark activity as having details synced - self.db.update_activity_details(user_id, activity_id, {'details_synced': True}) + self.db.update_activity_details( + user_id, activity_id, {"details_synced": True} + ) except Exception as e: - self.progress.warning(f"Failed to sync details for activity {activity_id}: {e}") + self.progress.warning( + f"Failed to sync details for activity {activity_id}: {e}" + ) def _sync_exercise_sets(self, user_id: int, activity_id: str, activities_accessor): """Sync exercise sets for a strength training activity. @@ -300,9 +379,13 @@ def _sync_exercise_sets(self, user_id: int, activity_id: str, activities_accesso self.db.update_activity_details(user_id, activity_id, summary) except Exception as e: - self.progress.warning(f"Failed to sync exercise sets for activity {activity_id}: {e}") + self.progress.warning( + f"Failed to sync exercise sets for activity {activity_id}: {e}" + ) - def _sync_activity_splits(self, user_id: int, activity_id: str, activities_accessor): + def _sync_activity_splits( + self, user_id: int, activity_id: str, activities_accessor + ): """Sync lap/split data for a cardio activity. Args: @@ -317,14 +400,87 @@ def _sync_activity_splits(self, user_id: int, activity_id: str, activities_acces splits_data = activities_accessor.get_activity_splits(activity_id) if splits_data: - splits = self.extractor.extract_activity_splits(splits_data, activity_id) + splits = self.extractor.extract_activity_splits( + splits_data, activity_id + ) if splits: self.db.store_activity_splits(user_id, activity_id, splits) except Exception as e: - self.progress.warning(f"Failed to sync splits for activity {activity_id}: {e}") + self.progress.warning( + f"Failed to sync splits for activity {activity_id}: {e}" + ) + + def _sync_body_composition_batch( + self, user_id: int, start_date: date, end_date: date, stats: Dict[str, int] + ) -> None: + """Sync body composition for entire date range in one API call. + + Body composition uses a range endpoint, so we fetch all data at once + rather than iterating date by date. - def backfill_activity_details(self, user_id: int, limit: int = 100) -> Dict[str, int]: + Args: + user_id: User identifier + start_date: Start of sync range + end_date: End of sync range + stats: Stats dictionary to update + """ + if not self.api_client: + self.progress.error("API client not initialized") + stats["failed"] += 1 + return + + try: + self.progress.info( + f"Syncing body composition for {start_date} to {end_date}" + ) + + # Single API call for entire range + endpoint = f"/weight-service/weight/range/{start_date}/{end_date}" + data = self.api_client.connectapi(endpoint) + + if not data: + self.progress.info("No body composition data found") + return + + # Extract entries using the extractor + entries = self.extractor._extract_body_composition_data(data) + + if not entries: + self.progress.info("No body composition entries to store") + return + + # Store each entry + stored = 0 + skipped = 0 + for entry in entries: + sample_pk = entry.get("sample_pk") + if not sample_pk: + continue + + if self.db.body_composition_exists(user_id, sample_pk): + skipped += 1 + else: + self.db.store_body_composition(user_id, entry) + stored += 1 + + stats["completed"] += stored + stats["skipped"] += skipped + + self.progress.info( + f"Body composition: stored {stored}, skipped {skipped} existing" + ) + + # Rate limiting + time.sleep(self.config.sync.rate_limit_delay) + + except Exception as e: + self.progress.error(f"Body composition sync failed: {e}") + stats["failed"] += 1 + + def backfill_activity_details( + self, user_id: int, limit: int = 100 + ) -> Dict[str, int]: """Backfill detailed data for activities that don't have details synced. Args: @@ -337,27 +493,31 @@ def backfill_activity_details(self, user_id: int, limit: int = 100) -> Dict[str, if not self.api_client: raise RuntimeError("Must call initialize() before backfilling") - stats = {'completed': 0, 'failed': 0, 'total': 0} + stats = {"completed": 0, "failed": 0, "total": 0} activities = self.db.get_activities_without_details(user_id, limit) - stats['total'] = len(activities) + stats["total"] = len(activities) self.progress.info(f"Backfilling details for {len(activities)} activities") for activity in activities: - activity_id = activity['activity_id'] - activity_type = activity.get('activity_type') + activity_id = activity["activity_id"] + activity_type = activity.get("activity_type") try: self._sync_activity_details(user_id, str(activity_id), activity_type) - stats['completed'] += 1 + stats["completed"] += 1 except Exception as e: self.progress.warning(f"Failed to backfill activity {activity_id}: {e}") - stats['failed'] += 1 + stats["failed"] += 1 - self.progress.info(f"Backfill complete: {stats['completed']} succeeded, {stats['failed']} failed") + self.progress.info( + f"Backfill complete: {stats['completed']} succeeded, {stats['failed']} failed" + ) return stats - def backfill_activity_splits(self, user_id: int, limit: int = 100) -> Dict[str, int]: + def backfill_activity_splits( + self, user_id: int, limit: int = 100 + ) -> Dict[str, int]: """Backfill splits for cardio activities that don't have splits yet. This is useful for activities that were synced before the splits feature @@ -373,65 +533,83 @@ def backfill_activity_splits(self, user_id: int, limit: int = 100) -> Dict[str, if not self.api_client: raise RuntimeError("Must call initialize() before backfilling") - stats = {'completed': 0, 'skipped': 0, 'failed': 0, 'total': 0} + stats = {"completed": 0, "skipped": 0, "failed": 0, "total": 0} # Get cardio activities that don't have splits activities = self._get_cardio_activities_without_splits(user_id, limit) - stats['total'] = len(activities) + stats["total"] = len(activities) - self.progress.info(f"Backfilling splits for {len(activities)} cardio activities") + self.progress.info( + f"Backfilling splits for {len(activities)} cardio activities" + ) - activities_accessor = self.api_client.metrics.get('activities') + activities_accessor = self.api_client.metrics.get("activities") for activity in activities: - activity_id = activity['activity_id'] - activity_type = activity.get('activity_type') + activity_id = activity["activity_id"] + activity_type = activity.get("activity_type") # Skip if not a cardio type if activity_type not in self.CARDIO_TYPES: - stats['skipped'] += 1 + stats["skipped"] += 1 continue try: - self._sync_activity_splits(user_id, str(activity_id), activities_accessor) - stats['completed'] += 1 + self._sync_activity_splits( + user_id, str(activity_id), activities_accessor + ) + stats["completed"] += 1 # Rate limiting - import time time.sleep(self.config.sync.rate_limit_delay) except Exception as e: - self.progress.warning(f"Failed to backfill splits for activity {activity_id}: {e}") - stats['failed'] += 1 + self.progress.warning( + f"Failed to backfill splits for activity {activity_id}: {e}" + ) + stats["failed"] += 1 - self.progress.info(f"Splits backfill complete: {stats['completed']} succeeded, {stats['skipped']} skipped, {stats['failed']} failed") + self.progress.info( + f"Splits backfill complete: {stats['completed']} succeeded, {stats['skipped']} skipped, {stats['failed']} failed" + ) return stats - def _get_cardio_activities_without_splits(self, user_id: int, limit: int) -> List[Dict[str, Any]]: + def _get_cardio_activities_without_splits( + self, user_id: int, limit: int + ) -> List[Dict[str, Any]]: """Get cardio activities that don't have splits stored yet.""" with self.db.get_session() as session: + from sqlalchemy import and_, exists, not_ + from .models import Activity, ActivitySplit - from sqlalchemy import and_, not_, exists # Subquery to find activities with splits has_splits = exists().where( and_( ActivitySplit.user_id == Activity.user_id, - ActivitySplit.activity_id == Activity.activity_id + ActivitySplit.activity_id == Activity.activity_id, ) ) - activities = session.query(Activity).filter( - and_( - Activity.user_id == user_id, - Activity.activity_type.in_(self.CARDIO_TYPES), - ~has_splits + activities = ( + session.query(Activity) + .filter( + and_( + Activity.user_id == user_id, + Activity.activity_type.in_(self.CARDIO_TYPES), + ~has_splits, + ) ) - ).order_by(Activity.activity_date.desc()).limit(limit).all() + .order_by(Activity.activity_date.desc()) + .limit(limit) + .all() + ) return [self.db._activity_to_dict(a) for a in activities] - def _store_health_metric(self, user_id: int, sync_date: date, metric_type: MetricType, data: Dict): + def _store_health_metric( + self, user_id: int, sync_date: date, metric_type: MetricType, data: Dict + ): """Store health metric data in normalized table.""" if metric_type == MetricType.DAILY_SUMMARY: self.db.store_health_metric(user_id, sync_date, **data) @@ -439,26 +617,37 @@ def _store_health_metric(self, user_id: int, sync_date: date, metric_type: Metri self.db.store_health_metric(user_id, sync_date, **data) elif metric_type == MetricType.TRAINING_READINESS: self.db.store_health_metric( - user_id, sync_date, - training_readiness_score=data.get('score'), - training_readiness_level=data.get('level'), - training_readiness_feedback=data.get('feedback') + user_id, + sync_date, + training_readiness_score=data.get("score"), + training_readiness_level=data.get("level"), + training_readiness_feedback=data.get("feedback"), ) elif metric_type == MetricType.HRV: self.db.store_health_metric( - user_id, sync_date, - hrv_weekly_avg=data.get('weekly_avg'), - hrv_last_night_avg=data.get('last_night_avg'), - hrv_status=data.get('status') + user_id, + sync_date, + hrv_weekly_avg=data.get("weekly_avg"), + hrv_last_night_avg=data.get("last_night_avg"), + hrv_status=data.get("status"), ) - elif metric_type in [MetricType.RESPIRATION, MetricType.HEART_RATE, MetricType.STRESS, MetricType.BODY_BATTERY, MetricType.STEPS, MetricType.CALORIES]: + elif metric_type in [ + MetricType.RESPIRATION, + MetricType.HEART_RATE, + MetricType.STRESS, + MetricType.BODY_BATTERY, + MetricType.STEPS, + MetricType.CALORIES, + ]: # Store all extracted data for these metrics self.db.store_health_metric(user_id, sync_date, **data) - def _is_metric_completed(self, user_id: int, metric_type: MetricType, sync_date: date) -> bool: + def _is_metric_completed( + self, user_id: int, metric_type: MetricType, sync_date: date + ) -> bool: """Check if metric is already completed.""" status = self.db.get_sync_status(user_id, sync_date, metric_type) - return status == 'completed' + return status == "completed" def _date_range(self, start_date: date, end_date: date): """Generate date range in either direction.""" @@ -468,24 +657,35 @@ def _date_range(self, start_date: date, end_date: date): yield current current += timedelta(days=step) - def query_health_metrics(self, user_id: int, start_date: date, end_date: date) -> List[Dict]: + def query_health_metrics( + self, user_id: int, start_date: date, end_date: date + ) -> List[Dict]: """Query normalized health metrics for analysis.""" return self.db.get_health_metrics(user_id, start_date, end_date) - def query_activities(self, user_id: int, start_date: date, end_date: date, - activity_name: Optional[str] = None) -> List[Dict]: + def query_activities( + self, + user_id: int, + start_date: date, + end_date: date, + activity_name: Optional[str] = None, + ) -> List[Dict]: """Query activities for date range.""" return self.db.get_activities(user_id, start_date, end_date, activity_name) - def query_timeseries(self, user_id: int, metric_type: MetricType, - start_time: datetime, end_time: datetime) -> List[Dict]: + def query_timeseries( + self, + user_id: int, + metric_type: MetricType, + start_time: datetime, + end_time: datetime, + ) -> List[Dict]: """Query timeseries data for time range.""" start_ts = int(start_time.timestamp()) * self.config.database.ms_per_second end_ts = int(end_time.timestamp()) * self.config.database.ms_per_second data = self.db.get_timeseries(user_id, metric_type, start_ts, end_ts) - return [{ - 'timestamp': ts, - 'value': value, - 'metadata': metadata - } for ts, value, metadata in data] + return [ + {"timestamp": ts, "value": value, "metadata": metadata} + for ts, value, metadata in data + ] diff --git a/src/garmy/mcp/__init__.py b/src/garmy/mcp/__init__.py index 07946ca..d1caf95 100644 --- a/src/garmy/mcp/__init__.py +++ b/src/garmy/mcp/__init__.py @@ -7,7 +7,8 @@ try: from .config import MCPConfig from .server import create_mcp_server + __all__ = ["MCPConfig", "create_mcp_server"] except ImportError: # FastMCP not installed - __all__ = [] \ No newline at end of file + __all__ = [] diff --git a/src/garmy/mcp/__main__.py b/src/garmy/mcp/__main__.py index 3ccc60f..bbcfa72 100644 --- a/src/garmy/mcp/__main__.py +++ b/src/garmy/mcp/__main__.py @@ -3,4 +3,4 @@ from .cli import main if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/garmy/mcp/cli.py b/src/garmy/mcp/cli.py index ddca701..d8a9167 100644 --- a/src/garmy/mcp/cli.py +++ b/src/garmy/mcp/cli.py @@ -12,6 +12,7 @@ try: from .server import create_mcp_server except ImportError: + def create_mcp_server(*args, **kwargs): raise ImportError( "FastMCP is required for MCP server functionality. " @@ -21,66 +22,75 @@ def create_mcp_server(*args, **kwargs): def validate_database_path(db_path: str) -> Path: """Validate database path exists and is accessible. - + Args: db_path: Path to SQLite database file - + Returns: Validated Path object - + Raises: FileNotFoundError: If database file doesn't exist PermissionError: If database file is not readable """ path = Path(db_path).resolve() - + if not path.exists(): raise FileNotFoundError(f"Database file not found: {path}") - + if not path.is_file(): raise ValueError(f"Path is not a file: {path}") - + if not os.access(path, os.R_OK): raise PermissionError(f"Database file is not readable: {path}") - + return path def cmd_server(args): """Start MCP server with specified configuration.""" # Determine database path - db_path_str = args.database or os.environ.get('GARMY_DB_PATH') - + db_path_str = args.database or os.environ.get("GARMY_DB_PATH") + if not db_path_str: - print("Error: Database path must be provided via --database argument or GARMY_DB_PATH environment variable", file=sys.stderr) + print( + "Error: Database path must be provided via --database argument or GARMY_DB_PATH environment variable", + file=sys.stderr, + ) sys.exit(1) - + try: # Validate database path db_path = validate_database_path(db_path_str) - + # Validate configuration parameters if args.max_rows > args.max_rows_absolute: - print(f"Error: --max-rows ({args.max_rows}) cannot exceed --max-rows-absolute ({args.max_rows_absolute})", file=sys.stderr) + print( + f"Error: --max-rows ({args.max_rows}) cannot exceed --max-rows-absolute ({args.max_rows_absolute})", + file=sys.stderr, + ) sys.exit(1) - + if args.max_rows <= 0: print("Error: --max-rows must be positive", file=sys.stderr) sys.exit(1) - + if args.max_rows_absolute > 10000: - print("Error: --max-rows-absolute cannot exceed 10000 for security reasons", file=sys.stderr) + print( + "Error: --max-rows-absolute cannot exceed 10000 for security reasons", + file=sys.stderr, + ) sys.exit(1) - + # Create config with CLI parameters config = MCPConfig( db_path=db_path, max_rows=args.max_rows, max_rows_absolute=args.max_rows_absolute, enable_query_logging=args.enable_query_logging, - strict_validation=not args.disable_strict_validation + strict_validation=not args.disable_strict_validation, ) - + if args.verbose: print(f"Starting Garmin LocalDB MCP Server...") print(f"Database: {db_path}") @@ -90,12 +100,14 @@ def cmd_server(args): print(f" - Max rows absolute limit: {config.max_rows_absolute}") print(f" - Query logging: {config.enable_query_logging}") print(f" - Strict validation: {config.strict_validation}") - print(f"Available tools: explore_database_structure, get_table_details, execute_sql_query, get_health_summary") - + print( + f"Available tools: explore_database_structure, get_table_details, execute_sql_query, get_health_summary" + ) + # Create and run server with explicit config mcp_server = create_mcp_server(config) mcp_server.run() - + except (FileNotFoundError, PermissionError, ValueError) as e: print(f"Error: {e}", file=sys.stderr) sys.exit(1) @@ -110,55 +122,63 @@ def cmd_server(args): def cmd_info(args): """Show information about the database and MCP server configuration.""" # Determine database path - db_path_str = args.database or os.environ.get('GARMY_DB_PATH') - + db_path_str = args.database or os.environ.get("GARMY_DB_PATH") + if not db_path_str: - print("Error: Database path must be provided via --database argument or GARMY_DB_PATH environment variable", file=sys.stderr) + print( + "Error: Database path must be provided via --database argument or GARMY_DB_PATH environment variable", + file=sys.stderr, + ) sys.exit(1) - + try: db_path = validate_database_path(db_path_str) - + # Get database info file_size = db_path.stat().st_size file_size_mb = file_size / (1024 * 1024) - + print("Garmin LocalDB MCP Server Information") print("=" * 40) print(f"Database file: {db_path}") print(f"File size: {file_size_mb:.2f} MB") - print(f"Read access: {'✅ Available' if os.access(db_path, os.R_OK) else '❌ Denied'}") - + print( + f"Read access: {'✅ Available' if os.access(db_path, os.R_OK) else '❌ Denied'}" + ) + # Try to get table info try: from .server import DatabaseManager + config = MCPConfig.from_db_path(db_path) db_manager = DatabaseManager(config) - + # Get table information - tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name" + tables_query = ( + "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name" + ) tables = db_manager.execute_safe_query(tables_query) - + print(f"\\nAvailable tables: {len(tables)}") for table in tables: - table_name = table['name'] + table_name = table["name"] count_query = f"SELECT COUNT(*) as count FROM {table_name}" count_result = db_manager.execute_safe_query(count_query) - row_count = count_result[0]['count'] if count_result else 0 + row_count = count_result[0]["count"] if count_result else 0 print(f" - {table_name}: {row_count:,} records") - + except Exception as e: print(f"\\nWarning: Could not analyze database structure: {e}") - + print("\\nMCP Server Tools:") print(" - explore_database_structure() - Discover available data") print(" - get_table_details(name) - Get table schema and samples") print(" - execute_sql_query(sql, params) - Run SQL queries safely") print(" - get_health_summary(user_id, days) - Quick health overview") - + print("\\nTo start MCP server:") print(f" garmy-mcp server --database {db_path}") - + except Exception as e: print(f"Error: {e}", file=sys.stderr) sys.exit(1) @@ -168,42 +188,46 @@ def cmd_config(args): """Show example configurations for different use cases.""" print("Garmin LocalDB MCP Server - Configuration Examples") print("=" * 50) - + print("\\n📋 Basic Usage:") print(" garmy-mcp server --database health.db") - + print("\\n🏭 Production Configuration (restrictive):") print(" garmy-mcp server --database health.db \\\\") print(" --max-rows 100 \\\\") print(" --max-rows-absolute 500") - + print("\\n🔧 Development Configuration (permissive with logging):") print(" garmy-mcp server --database health.db \\\\") print(" --max-rows 2000 \\\\") print(" --enable-query-logging \\\\") print(" --verbose") - + print("\\n🐛 Debug Configuration (relaxed validation):") print(" garmy-mcp server --database health.db \\\\") print(" --disable-strict-validation \\\\") print(" --enable-query-logging \\\\") print(" --verbose") - + print("\\n🤖 Claude Desktop Integration:") - print(' {') + print(" {") print(' "mcpServers": {') print(' "garmy-localdb": {') print(' "command": "garmy-mcp",') - print(' "args": ["server", "--database", "/path/to/health.db", "--max-rows", "500"]') - print(' }') - print(' }') - print(' }') - + print( + ' "args": ["server", "--database", "/path/to/health.db", "--max-rows", "500"]' + ) + print(" }") + print(" }") + print(" }") + print("\\n🔐 Security Settings:") print(" --max-rows: Limit rows per query (default: 1000, max: 5000)") print(" --max-rows-absolute: Hard security limit (default: 5000, max: 10000)") print(" --enable-query-logging: Log all SQL queries for debugging") - print(" --disable-strict-validation: Allow relaxed SQL validation (not recommended)") + print( + " --disable-strict-validation: Allow relaxed SQL validation (not recommended)" + ) def create_parser(): @@ -219,83 +243,80 @@ def create_parser(): garmy-mcp config Use 'garmy-mcp --help' for command-specific help. - """ + """, ) - + # Subcommands - subparsers = parser.add_subparsers(dest='command', help='Available commands') + subparsers = parser.add_subparsers(dest="command", help="Available commands") subparsers.required = True - + # Server command server_parser = subparsers.add_parser( - 'server', - help='Start MCP server', - description='Start the MCP server with specified configuration' + "server", + help="Start MCP server", + description="Start the MCP server with specified configuration", ) - + server_parser.add_argument( - '--database', '-d', - type=str, - help="Path to Garmin LocalDB SQLite database file" + "--database", "-d", type=str, help="Path to Garmin LocalDB SQLite database file" ) - + server_parser.add_argument( - '--max-rows', + "--max-rows", type=int, default=1000, - help="Maximum number of rows per query (default: 1000, max: 5000)" + help="Maximum number of rows per query (default: 1000, max: 5000)", ) - + server_parser.add_argument( - '--max-rows-absolute', + "--max-rows-absolute", type=int, default=5000, - help="Absolute maximum rows limit for security (default: 5000, max: 10000)" + help="Absolute maximum rows limit for security (default: 5000, max: 10000)", ) - + server_parser.add_argument( - '--enable-query-logging', - action='store_true', - help="Enable SQL query logging for debugging" + "--enable-query-logging", + action="store_true", + help="Enable SQL query logging for debugging", ) - + server_parser.add_argument( - '--disable-strict-validation', - action='store_true', - help="Disable strict SQL validation (not recommended)" + "--disable-strict-validation", + action="store_true", + help="Disable strict SQL validation (not recommended)", ) - + server_parser.add_argument( - '--verbose', '-v', - action='store_true', - help="Enable verbose logging and configuration display" + "--verbose", + "-v", + action="store_true", + help="Enable verbose logging and configuration display", ) - + server_parser.set_defaults(func=cmd_server) - + # Info command info_parser = subparsers.add_parser( - 'info', - help='Show database and server information', - description='Display information about the database and available MCP tools' + "info", + help="Show database and server information", + description="Display information about the database and available MCP tools", ) - + info_parser.add_argument( - '--database', '-d', - type=str, - help="Path to Garmin LocalDB SQLite database file" + "--database", "-d", type=str, help="Path to Garmin LocalDB SQLite database file" ) - + info_parser.set_defaults(func=cmd_info) - + # Config command config_parser = subparsers.add_parser( - 'config', - help='Show configuration examples', - description='Display example configurations for different use cases' + "config", + help="Show configuration examples", + description="Display example configurations for different use cases", ) config_parser.set_defaults(func=cmd_config) - + return parser @@ -303,10 +324,10 @@ def main(): """Main entry point for garmy-mcp CLI.""" parser = create_parser() args = parser.parse_args() - + # Execute the selected command args.func(args) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/garmy/mcp/config.py b/src/garmy/mcp/config.py index 23462c6..33caad6 100644 --- a/src/garmy/mcp/config.py +++ b/src/garmy/mcp/config.py @@ -1,40 +1,40 @@ """Configuration management for Garmin LocalDB MCP Server.""" +from dataclasses import dataclass from pathlib import Path from typing import Optional -from dataclasses import dataclass @dataclass class MCPConfig: """Configuration for MCP server behavior and security settings.""" - + # Database settings db_path: Path - + # Query execution limits max_rows: int = 1000 max_rows_absolute: int = 5000 - + # Security settings enable_query_logging: bool = False strict_validation: bool = True - + @classmethod def from_db_path(cls, db_path: Path, **kwargs) -> "MCPConfig": """Create config with database path and optional overrides.""" return cls(db_path=db_path, **kwargs) - + def validate(self) -> None: """Validate configuration settings.""" if not self.db_path.exists(): raise FileNotFoundError(f"Database file not found: {self.db_path}") - + if not self.db_path.is_file(): raise ValueError(f"Path is not a file: {self.db_path}") - + if self.max_rows > self.max_rows_absolute: raise ValueError(f"max_rows cannot exceed {self.max_rows_absolute}") - + if self.max_rows <= 0: - raise ValueError("max_rows must be positive") \ No newline at end of file + raise ValueError("max_rows must be positive") diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index e14d0ba..3c80160 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -4,10 +4,10 @@ through the Model Context Protocol with optimized tools for LLM understanding. """ +import logging import os import re import sqlite3 -import logging from pathlib import Path from typing import Any, Dict, List, Optional @@ -19,23 +19,23 @@ "Install with: pip install garmy[mcp] or pip install fastmcp" ) -from .config import MCPConfig from ..localdb.models import MetricType +from .config import MCPConfig class SQLiteConnection: """Secure SQLite connection context manager for read-only access.""" - + def __init__(self, db_path: Path): self.db_path = db_path self.conn = None - + def __enter__(self): """Open read-only SQLite connection.""" self.conn = sqlite3.connect(f"file:{self.db_path}?mode=ro", uri=True) self.conn.row_factory = sqlite3.Row return self.conn - + def __exit__(self, exc_type, exc_val, exc_tb): """Close connection safely.""" if self.conn: @@ -44,114 +44,127 @@ def __exit__(self, exc_type, exc_val, exc_tb): class QueryValidator: """SQL query validation and sanitization for read-only access.""" - - ALLOWED_STATEMENTS = ('select', 'with') + + ALLOWED_STATEMENTS = ("select", "with") FORBIDDEN_KEYWORDS = { - 'insert', 'update', 'delete', 'drop', 'create', 'alter', - 'pragma', 'attach', 'detach', 'vacuum', 'analyze' + "insert", + "update", + "delete", + "drop", + "create", + "alter", + "pragma", + "attach", + "detach", + "vacuum", + "analyze", } - + @classmethod def validate_query(cls, query: str) -> None: """Validate SQL query for read-only access. - + Args: query: SQL query to validate - + Raises: ValueError: If query is not safe for read-only access """ if not query or not query.strip(): raise ValueError("Query cannot be empty") - + query_lower = query.lower().strip() - + # Check if query starts with allowed statement if not any(query_lower.startswith(prefix) for prefix in cls.ALLOWED_STATEMENTS): - allowed = ', '.join(cls.ALLOWED_STATEMENTS).upper() + allowed = ", ".join(cls.ALLOWED_STATEMENTS).upper() raise ValueError(f"Only {allowed} queries are allowed for security") - + # Check for forbidden keywords - query_words = set(re.findall(r'\\b\\w+\\b', query_lower)) + query_words = set(re.findall(r"\\b\\w+\\b", query_lower)) forbidden_found = query_words.intersection(cls.FORBIDDEN_KEYWORDS) if forbidden_found: raise ValueError(f"Forbidden keywords found: {', '.join(forbidden_found)}") - + # Check for multiple statements if cls._contains_multiple_statements(query): raise ValueError("Multiple statements not allowed") - + @staticmethod def _contains_multiple_statements(sql: str) -> bool: """Check if SQL contains multiple statements.""" in_single_quote = False in_double_quote = False - + for char in sql: if char == "'" and not in_double_quote: in_single_quote = not in_single_quote elif char == '"' and not in_single_quote: in_double_quote = not in_double_quote - elif char == ';' and not in_single_quote and not in_double_quote: + elif char == ";" and not in_single_quote and not in_double_quote: return True - + return False - + @staticmethod def add_row_limit(query: str, limit: int = 1000) -> str: """Add LIMIT clause if not present.""" query_lower = query.lower() - if 'limit' not in query_lower: + if "limit" not in query_lower: return f"{query.rstrip(';')} LIMIT {limit}" return query class DatabaseManager: """Manages database connections and basic operations.""" - + def __init__(self, config: MCPConfig): self.config = config self.validator = QueryValidator() self.logger = logging.getLogger("garmy.mcp.database") - + # Configure logging if enabled if config.enable_query_logging and not self.logger.handlers: handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s' - )) + handler.setFormatter( + logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + ) self.logger.addHandler(handler) self.logger.setLevel(logging.INFO) - + def get_connection(self): """Get read-only database connection.""" return SQLiteConnection(self.config.db_path) - - def execute_safe_query(self, query: str, params: Optional[List[Any]] = None) -> List[Dict[str, Any]]: + + def execute_safe_query( + self, query: str, params: Optional[List[Any]] = None + ) -> List[Dict[str, Any]]: """Execute validated query with safety checks.""" # Validate query if self.config.strict_validation: self.validator.validate_query(query) - + # Add row limit original_query = query query = self.validator.add_row_limit(query, self.config.max_rows) - + # Log query if enabled if self.config.enable_query_logging: self.logger.info(f"Executing query: {query}") if params: self.logger.info(f"Parameters: {params}") - + try: with self.get_connection() as conn: cursor = conn.cursor() cursor.execute(query, params or []) results = [dict(row) for row in cursor.fetchall()] - + if self.config.enable_query_logging: self.logger.info(f"Query returned {len(results)} rows") - + return results except sqlite3.Error as e: if self.config.enable_query_logging: @@ -162,34 +175,34 @@ def execute_safe_query(self, query: str, params: Optional[List[Any]] = None) -> # Initialize MCP server def create_mcp_server(config: Optional[MCPConfig] = None) -> FastMCP: """Create and configure the Garmin LocalDB MCP server. - + Args: config: Optional MCP configuration. If None, loads from environment. """ if config is None: # Fallback to environment variable for backwards compatibility - if 'GARMY_DB_PATH' not in os.environ: + if "GARMY_DB_PATH" not in os.environ: raise ValueError("GARMY_DB_PATH environment variable must be set") - - db_path = Path(os.environ['GARMY_DB_PATH']) + + db_path = Path(os.environ["GARMY_DB_PATH"]) config = MCPConfig.from_db_path(db_path) - + # Validate configuration config.validate() - + # Initialize components db_manager = DatabaseManager(config) - + # Initialize MCP server with clear, LLM-friendly name mcp = FastMCP("Garmin Health Data Explorer") - + @mcp.tool() def explore_database_structure() -> Dict[str, Any]: """WHEN TO USE: When you need to understand what health data is available. - + This is your starting point for exploring Garmin health data. Use this tool first to see what tables and data types are available before running specific queries. - + Returns: Complete database structure with table descriptions and available data types """ @@ -201,47 +214,47 @@ def explore_database_structure() -> Dict[str, Any]: ORDER BY name """ tables = db_manager.execute_safe_query(tables_query) - table_names = [row['name'] for row in tables] - + table_names = [row["name"] for row in tables] + # Get row counts for each table table_info = {} for table_name in table_names: count_query = f"SELECT COUNT(*) as count FROM {table_name}" count_result = db_manager.execute_safe_query(count_query) - + table_info[table_name] = { - "row_count": count_result[0]['count'], - "description": _get_table_description(table_name) + "row_count": count_result[0]["count"], + "description": _get_table_description(table_name), } - + return { "available_tables": table_info, "metric_types": [mt.value for mt in MetricType], - "usage_tip": "Use 'execute_sql_query' to get specific data from any table, or 'get_table_details' to see column structure" + "usage_tip": "Use 'execute_sql_query' to get specific data from any table, or 'get_table_details' to see column structure", } except Exception as e: raise ValueError(f"Failed to explore database: {str(e)}") - + @mcp.tool() def get_table_details(table_name: str) -> Dict[str, Any]: """WHEN TO USE: When you need to see the structure and sample data of a specific table. - + Use this after 'explore_database_structure' when you want to understand what columns are available in a table and see examples of the actual data. - + Args: table_name: Name of the health data table (e.g., 'daily_health_metrics', 'activities') - + Returns: Table structure with columns, data types, and sample records """ if not table_name or not table_name.strip(): raise ValueError("Table name cannot be empty") - + # Sanitize table name - if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', table_name): + if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", table_name): raise ValueError("Invalid table name format") - + try: # Verify table exists check_query = """ @@ -249,99 +262,100 @@ def get_table_details(table_name: str) -> Dict[str, Any]: WHERE type='table' AND name=? """ check_result = db_manager.execute_safe_query(check_query, [table_name]) - + if not check_result: available_tables = db_manager.execute_safe_query( "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name" ) - table_list = [row['name'] for row in available_tables] - raise ValueError(f"Table '{table_name}' does not exist. Available tables: {', '.join(table_list)}") - + table_list = [row["name"] for row in available_tables] + raise ValueError( + f"Table '{table_name}' does not exist. Available tables: {', '.join(table_list)}" + ) + # Get table schema using PRAGMA schema_query = f"PRAGMA table_info({table_name})" with db_manager.get_connection() as conn: cursor = conn.cursor() cursor.execute(schema_query) columns = cursor.fetchall() - - column_info = [{ - 'name': col[1], - 'type': col[2], - 'required': bool(col[3]), - 'is_primary_key': bool(col[5]) - } for col in columns] - + + column_info = [ + { + "name": col[1], + "type": col[2], + "required": bool(col[3]), + "is_primary_key": bool(col[5]), + } + for col in columns + ] + # Get sample data (latest 3 records) sample_query = f"SELECT * FROM {table_name} ORDER BY rowid DESC LIMIT 3" sample_data = db_manager.execute_safe_query(sample_query) - + return { "table_name": table_name, "columns": column_info, "sample_data": sample_data, "description": _get_table_description(table_name), - "usage_tip": f"Use 'execute_sql_query' with SELECT statements to get specific data from {table_name}" + "usage_tip": f"Use 'execute_sql_query' with SELECT statements to get specific data from {table_name}", } - + except Exception as e: raise ValueError(f"Failed to get table details: {str(e)}") - + @mcp.tool() def execute_sql_query( - query: str, - params: Optional[List[Any]] = None + query: str, params: Optional[List[Any]] = None ) -> List[Dict[str, Any]]: """WHEN TO USE: When you need to get specific data using SQL queries. - + This is the main tool for querying any data from the database. Use it to run SELECT queries to analyze health metrics, activities, sync status, or find patterns across any tables. - + IMPORTANT: Only SELECT and WITH queries are allowed for security. - + Args: query: SQL SELECT query (e.g., "SELECT metric_date, total_steps FROM daily_health_metrics WHERE user_id = 1") params: Optional list of parameters for ? placeholders in query - + Example queries: - Health metrics: "SELECT metric_date, sleep_duration_hours FROM daily_health_metrics WHERE user_id = 1 ORDER BY metric_date DESC LIMIT 10" - Activities: "SELECT activity_date, activity_name, duration_seconds FROM activities WHERE user_id = 1" - High step days: "SELECT metric_date, total_steps FROM daily_health_metrics WHERE total_steps > 10000" - Timeseries data: "SELECT timestamp, value FROM timeseries WHERE metric_type = 'heart_rate'" - + Returns: List of matching records as dictionaries """ if not query or not query.strip(): raise ValueError("Query cannot be empty") - + try: return db_manager.execute_safe_query(query, params) except Exception as e: raise ValueError(f"Query execution failed: {str(e)}") - + @mcp.tool() - def get_health_summary( - user_id: int = 1, - days: int = 30 - ) -> Dict[str, Any]: + def get_health_summary(user_id: int = 1, days: int = 30) -> Dict[str, Any]: """WHEN TO USE: When you want a quick overview of health metrics without writing SQL. - + This tool provides a ready-made summary of key health metrics over a specified period. Use this for getting an overview before diving into specific analysis. - + Args: user_id: User ID to analyze (default: 1) days: Number of recent days to analyze (max 365, default: 30) - + Returns: Summary statistics including averages for steps, sleep, heart rate, stress, and activity count """ if days > 365: raise ValueError("Days cannot exceed 365") - + if user_id < 1: raise ValueError("User ID must be positive") - + try: # Get health metrics summary summary_query = """ @@ -357,10 +371,12 @@ def get_health_summary( WHERE user_id = ? AND metric_date >= date('now', '-' || ? || ' days') """ - - summary_result = db_manager.execute_safe_query(summary_query, [user_id, days]) + + summary_result = db_manager.execute_safe_query( + summary_query, [user_id, days] + ) summary = summary_result[0] if summary_result else {} - + # Get activity count activity_query = """ SELECT COUNT(*) as activity_count @@ -368,28 +384,30 @@ def get_health_summary( WHERE user_id = ? AND activity_date >= date('now', '-' || ? || ' days') """ - - activity_result = db_manager.execute_safe_query(activity_query, [user_id, days]) + + activity_result = db_manager.execute_safe_query( + activity_query, [user_id, days] + ) if activity_result: - summary['total_activities'] = activity_result[0]['activity_count'] - - summary['analysis_period_days'] = days - summary['user_id'] = user_id - + summary["total_activities"] = activity_result[0]["activity_count"] + + summary["analysis_period_days"] = days + summary["user_id"] = user_id + return summary - + except Exception as e: raise ValueError(f"Failed to generate health summary: {str(e)}") - + @mcp.resource("file://health_data_guide") def health_data_guide() -> str: """Complete guide to understanding and querying Garmin health data. - + This resource provides all the information needed to understand the available health data and how to query it effectively. """ return _get_health_data_guide() - + return mcp @@ -399,14 +417,14 @@ def _get_table_description(table_name: str) -> str: "daily_health_metrics": "Daily health summaries including steps, sleep, heart rate, stress, and other key metrics", "timeseries": "High-frequency data like heart rate readings throughout the day, stress levels, body battery", "activities": "Individual workouts and physical activities with performance metrics", - "sync_status": "System table tracking data synchronization status (usually not needed for health analysis)" + "sync_status": "System table tracking data synchronization status (usually not needed for health analysis)", } return descriptions.get(table_name, "Health data table") def _get_health_data_guide() -> str: """Get comprehensive guide for health data analysis.""" - return ''' + return """ # Garmin Health Data Analysis Guide ## Quick Start @@ -454,7 +472,7 @@ def _get_health_data_guide() -> str: 2. **Correlation Analysis**: Look for relationships between sleep, stress, and performance 3. **Goal Tracking**: Monitor progress toward targets (steps, sleep duration) 4. **Activity Analysis**: Understand workout patterns and performance - '''.strip() + """.strip() # Legacy function for backwards compatibility @@ -475,4 +493,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/garmy/metrics/activities.py b/src/garmy/metrics/activities.py index 94bf72f..56f0b6b 100644 --- a/src/garmy/metrics/activities.py +++ b/src/garmy/metrics/activities.py @@ -326,6 +326,7 @@ def get_activity_details(self, activity_id: Union[int, str]) -> Dict[str, Any]: raise except Exception as e: from ..core.utils import handle_api_exception + return handle_api_exception(e, "fetching activity details", endpoint, {}) def get_exercise_sets(self, activity_id: Union[int, str]) -> Dict[str, Any]: @@ -345,6 +346,7 @@ def get_exercise_sets(self, activity_id: Union[int, str]) -> Dict[str, Any]: raise except Exception as e: from ..core.utils import handle_api_exception + return handle_api_exception(e, "fetching exercise sets", endpoint, {}) def get_activity_splits(self, activity_id: Union[int, str]) -> Dict[str, Any]: @@ -364,6 +366,7 @@ def get_activity_splits(self, activity_id: Union[int, str]) -> Dict[str, Any]: raise except Exception as e: from ..core.utils import handle_api_exception + return handle_api_exception(e, "fetching activity splits", endpoint, {}) # For compatibility with MetricAccessor interface diff --git a/src/garmy/metrics/body_composition.py b/src/garmy/metrics/body_composition.py new file mode 100644 index 0000000..7332b93 --- /dev/null +++ b/src/garmy/metrics/body_composition.py @@ -0,0 +1,231 @@ +"""Body Composition Data Module. + +================== + +This module provides direct access to Garmin body composition data from the Connect API. +Data includes weight, body fat percentage, muscle mass, bone mass, and other metrics +from compatible smart scales. + +Example: + >>> from garmy import AuthClient, APIClient + >>> auth_client = AuthClient() + >>> api_client = APIClient(auth_client=auth_client) + >>> auth_client.login("email@example.com", "password") + >>> + >>> # Get body composition for date range + >>> from datetime import date, timedelta + >>> end = date.today() + >>> start = end - timedelta(days=30) + >>> bc = api_client.metrics.get("body_composition").get_range(start, end) + >>> for m in bc.measurements: + ... print(f"{m.calendar_date}: {m.weight_kg:.1f} kg, {m.body_fat}% body fat") + +Data Source: + Garmin Connect API endpoint: /weight-service/weight/range +""" + +from dataclasses import dataclass, field +from datetime import date as date_type +from datetime import timedelta +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from ..core.base import MetricConfig + +if TYPE_CHECKING: + pass + + +@dataclass +class BodyCompositionEntry: + """Single body composition measurement from a smart scale. + + Attributes: + sample_pk: Garmin's unique identifier for this measurement + calendar_date: Date of the measurement (YYYY-MM-DD) + weight: Weight in grams + bmi: Body Mass Index + body_fat: Body fat percentage + body_water: Body water percentage + bone_mass: Bone mass in grams + muscle_mass: Muscle mass in grams + visceral_fat: Visceral fat rating + metabolic_age: Estimated metabolic age + physique_rating: Physique rating score + source_type: Source device type (e.g., "INDEX_SCALE") + timestamp_gmt: Unix timestamp in milliseconds (GMT) + """ + + sample_pk: str + calendar_date: str + weight: float # grams + bmi: Optional[float] = None + body_fat: Optional[float] = None # percentage + body_water: Optional[float] = None # percentage + bone_mass: Optional[float] = None # grams + muscle_mass: Optional[float] = None # grams + visceral_fat: Optional[float] = None + metabolic_age: Optional[int] = None + physique_rating: Optional[float] = None + source_type: Optional[str] = None + timestamp_gmt: Optional[int] = None + + @property + def weight_kg(self) -> float: + """Get weight in kilograms.""" + return self.weight / 1000 if self.weight else 0 + + @property + def weight_lbs(self) -> float: + """Get weight in pounds.""" + return self.weight_kg * 2.20462 + + @property + def bone_mass_kg(self) -> Optional[float]: + """Get bone mass in kilograms.""" + return self.bone_mass / 1000 if self.bone_mass else None + + @property + def muscle_mass_kg(self) -> Optional[float]: + """Get muscle mass in kilograms.""" + return self.muscle_mass / 1000 if self.muscle_mass else None + + @property + def bmi_category(self) -> Optional[str]: + """Get BMI category based on WHO classification. + + Returns: + Category string: "underweight", "normal", "overweight", or "obese" + """ + if not self.bmi: + return None + if self.bmi < 18.5: + return "underweight" + elif self.bmi < 25: + return "normal" + elif self.bmi < 30: + return "overweight" + return "obese" + + +@dataclass +class BodyComposition: + """Body composition data from Garmin weight service. + + Contains all body composition measurements for a date range, + along with average values. + + Attributes: + measurements: List of body composition entries + total_average: Average values across the date range + """ + + measurements: List[BodyCompositionEntry] = field(default_factory=list) + total_average: Optional[Dict[str, Any]] = None + + def __str__(self) -> str: + """Format body composition for human-readable display.""" + if not self.measurements: + return "No body composition data available" + + lines = [f"Body Composition: {len(self.measurements)} measurement(s)"] + + # Show most recent + latest = self.measurements[-1] + lines.append(f" Latest ({latest.calendar_date}):") + lines.append( + f" Weight: {latest.weight_kg:.1f} kg ({latest.weight_lbs:.1f} lbs)" + ) + if latest.body_fat: + lines.append(f" Body Fat: {latest.body_fat}%") + if latest.muscle_mass_kg: + lines.append(f" Muscle Mass: {latest.muscle_mass_kg:.1f} kg") + if latest.bmi: + lines.append(f" BMI: {latest.bmi:.1f} ({latest.bmi_category})") + + return "\n".join(lines) + + @property + def latest(self) -> Optional[BodyCompositionEntry]: + """Get the most recent measurement.""" + return self.measurements[-1] if self.measurements else None + + +def parse_body_composition(data: Dict[str, Any]) -> BodyComposition: + """Parse body composition API response. + + Args: + data: Raw API response from /weight-service/weight/range/ + + Returns: + BodyComposition object with parsed measurements + """ + measurements = [] + + for summary in data.get("dailyWeightSummaries", []): + latest = summary.get("latestWeight") + if latest and latest.get("samplePk"): + measurements.append( + BodyCompositionEntry( + sample_pk=str(latest.get("samplePk", "")), + calendar_date=latest.get("calendarDate", ""), + weight=latest.get("weight", 0), + bmi=latest.get("bmi"), + body_fat=latest.get("bodyFat"), + body_water=latest.get("bodyWater"), + bone_mass=latest.get("boneMass"), + muscle_mass=latest.get("muscleMass"), + visceral_fat=latest.get("visceralFat"), + metabolic_age=latest.get("metabolicAge"), + physique_rating=latest.get("physiqueRating"), + source_type=latest.get("sourceType"), + timestamp_gmt=latest.get("timestampGMT"), + ) + ) + + return BodyComposition( + measurements=measurements, total_average=data.get("totalAverage") + ) + + +def build_body_composition_endpoint( + start_date: Union[date_type, str, None] = None, + end_date: Union[date_type, str, None] = None, + api_client: Any = None, + **kwargs: Any, +) -> str: + """Build body composition endpoint with date range. + + Args: + start_date: Start of date range (default: 30 days ago) + end_date: End of date range (default: today) + api_client: API client (unused but required by interface) + **kwargs: Additional arguments (unused) + + Returns: + Endpoint URL string + """ + if end_date is None: + end_date = date_type.today() + elif isinstance(end_date, str): + end_date = date_type.fromisoformat(end_date) + + if start_date is None: + start_date = end_date - timedelta(days=30) + elif isinstance(start_date, str): + start_date = date_type.fromisoformat(start_date) + + return f"/weight-service/weight/range/{start_date}/{end_date}" + + +# MetricConfig for auto-discovery +METRIC_CONFIG = MetricConfig( + endpoint="", + metric_class=BodyComposition, + parser=parse_body_composition, + endpoint_builder=build_body_composition_endpoint, + requires_user_id=False, + description="Body composition data including weight, body fat, muscle mass from smart scales", + version="1.0", +) + +__metric_config__ = METRIC_CONFIG diff --git a/src/garmy/metrics/sleep.py b/src/garmy/metrics/sleep.py index 67bfb31..18f2b73 100644 --- a/src/garmy/metrics/sleep.py +++ b/src/garmy/metrics/sleep.py @@ -124,9 +124,11 @@ def total_sleep_duration_hours(self) -> Optional[float]: @property def sleep_efficiency_percentage(self) -> Optional[float]: """Calculate sleep efficiency (sleep time / time in bed).""" - if (self.sleep_end_timestamp_local is None or - self.sleep_start_timestamp_local is None or - self.sleep_time_seconds is None): + if ( + self.sleep_end_timestamp_local is None + or self.sleep_start_timestamp_local is None + or self.sleep_time_seconds is None + ): return None time_in_bed = ( self.sleep_end_timestamp_local - self.sleep_start_timestamp_local @@ -150,6 +152,9 @@ class Sleep: wellness_epoch_spo2_data_dto_list: SpO2 readings throughout the night (list of dicts) wellness_epoch_respiration_data_dto_list: Respiration readings throughout the night (list of dicts) + skin_temp_data_exists: Whether skin temperature data is available + skin_temp_deviation_c: Skin temperature deviation in Celsius + skin_temp_deviation_f: Skin temperature deviation in Fahrenheit Example: >>> sleep = garmy.sleep.get() @@ -171,6 +176,11 @@ class Sleep: default_factory=list ) + # Top-level skin temperature fields + skin_temp_data_exists: bool = False + skin_temp_deviation_c: Optional[float] = None + skin_temp_deviation_f: Optional[float] = None + def __str__(self) -> str: """Format sleep data for human-readable display.""" lines = [] @@ -266,17 +276,34 @@ def movement_readings_count(self) -> int: return len(self.sleep_movement) -# Create parser using factory function for nested summary + raw data -parse_sleep_data = create_nested_summary_parser( - Sleep, - SleepSummary, - "daily_sleep_dto", - [ - "sleep_movement", - "wellness_epoch_spo2_data_dto_list", - "wellness_epoch_respiration_data_dto_list", - ], -) +def parse_sleep_data(data: Dict[str, Any]) -> Sleep: + """Parse sleep data including top-level skin temperature fields. + + This custom parser extends the standard nested summary parser to also + capture skin temperature data that exists at the top level of the + API response (not nested in dailySleepDTO). + """ + # Use existing parser for nested summary and base data + base_parser = create_nested_summary_parser( + Sleep, + SleepSummary, + "daily_sleep_dto", + [ + "sleep_movement", + "wellness_epoch_spo2_data_dto_list", + "wellness_epoch_respiration_data_dto_list", + ], + ) + + # Parse base data + sleep = base_parser(data) + + # Add top-level skin temp fields (these are outside dailySleepDTO) + sleep.skin_temp_data_exists = data.get("skinTempDataExists", False) + sleep.skin_temp_deviation_c = data.get("avgSkinTempDeviationC") + sleep.skin_temp_deviation_f = data.get("avgSkinTempDeviationF") + + return sleep def build_sleep_endpoint( diff --git a/tests/test_auth_init.py b/tests/test_auth_init.py index 1939dd5..306ba52 100644 --- a/tests/test_auth_init.py +++ b/tests/test_auth_init.py @@ -108,9 +108,9 @@ def test_no_extra_exports(self): ] for name in internal_names: - assert not hasattr(auth_module, name), ( - f"Internal {name} should not be exported" - ) + assert not hasattr( + auth_module, name + ), f"Internal {name} should not be exported" def test_import_style_consistency(self): """Test imports follow consistent patterns.""" @@ -205,9 +205,9 @@ def test_function_signatures_consistency(self): actual_resume_params = list(resume_sig.parameters.keys()) for param in expected_resume_params: - assert param in actual_resume_params, ( - f"resume_login missing parameter: {param}" - ) + assert ( + param in actual_resume_params + ), f"resume_login missing parameter: {param}" def test_type_hints_available(self): """Test that type hints are available for main functions.""" diff --git a/tests/test_core_http_client.py b/tests/test_core_http_client.py index 8dd52ea..e238978 100644 --- a/tests/test_core_http_client.py +++ b/tests/test_core_http_client.py @@ -80,7 +80,9 @@ def test_create_session_basic(self): "garmy.core.http_client.Session" ) as mock_session_class, patch.object( client, "_get_default_headers", return_value={"Test": "Header"} - ), patch.object(client, "_create_retry_strategy") as mock_retry: + ), patch.object( + client, "_create_retry_strategy" + ) as mock_retry: mock_session = Mock() mock_session_class.return_value = mock_session mock_retry_strategy = Mock() @@ -101,7 +103,9 @@ def test_create_session_with_adapters(self): "garmy.core.http_client.Session" ) as mock_session_class, patch.object( client, "_get_default_headers", return_value={} - ), patch.object(client, "_create_retry_strategy") as mock_retry, patch( + ), patch.object( + client, "_create_retry_strategy" + ) as mock_retry, patch( "garmy.core.http_client.HTTPAdapter" ) as mock_adapter_class: mock_session = Mock() diff --git a/tests/test_metrics_comprehensive.py b/tests/test_metrics_comprehensive.py index 6454c8c..d09dacf 100644 --- a/tests/test_metrics_comprehensive.py +++ b/tests/test_metrics_comprehensive.py @@ -1471,9 +1471,9 @@ def test_all_metrics_are_dataclasses(self): for name in metrics_module.__all__: metric_class = getattr(metrics_module, name) - assert hasattr(metric_class, "__dataclass_fields__"), ( - f"{name} is not a dataclass" - ) + assert hasattr( + metric_class, "__dataclass_fields__" + ), f"{name} is not a dataclass" def test_metric_configs_have_required_fields(self): """Test all metric configs have required fields.""" diff --git a/tests/test_metrics_remaining.py b/tests/test_metrics_remaining.py index f87876f..78aed39 100644 --- a/tests/test_metrics_remaining.py +++ b/tests/test_metrics_remaining.py @@ -509,9 +509,9 @@ def test_all_exported_classes_are_dataclasses(self): ] for cls in classes: - assert hasattr(cls, "__dataclass_fields__"), ( - f"{cls.__name__} should be a dataclass" - ) + assert hasattr( + cls, "__dataclass_fields__" + ), f"{cls.__name__} should be a dataclass" def test_metric_configs_consistency(self): """Test metric configurations are consistent across modules.""" @@ -534,27 +534,27 @@ def test_metric_configs_consistency(self): module = __import__(f"garmy.metrics.{module_name}", fromlist=[""]) # Check required attributes exist - assert hasattr(module, "METRIC_CONFIG"), ( - f"{module_name} missing METRIC_CONFIG" - ) - assert hasattr(module, "__metric_config__"), ( - f"{module_name} missing __metric_config__" - ) + assert hasattr( + module, "METRIC_CONFIG" + ), f"{module_name} missing METRIC_CONFIG" + assert hasattr( + module, "__metric_config__" + ), f"{module_name} missing __metric_config__" # Check config is valid config = module.METRIC_CONFIG - assert isinstance(config, MetricConfig), ( - f"{module_name} METRIC_CONFIG not MetricConfig instance" - ) - assert config.metric_class is not None, ( - f"{module_name} missing metric_class" - ) + assert isinstance( + config, MetricConfig + ), f"{module_name} METRIC_CONFIG not MetricConfig instance" + assert ( + config.metric_class is not None + ), f"{module_name} missing metric_class" assert config.version is not None, f"{module_name} missing version" # Check consistency between METRIC_CONFIG and __metric_config__ - assert module.__metric_config__ == config, ( - f"{module_name} config exports inconsistent" - ) + assert ( + module.__metric_config__ == config + ), f"{module_name} config exports inconsistent" except ImportError: pytest.skip(f"Module {module_name} not implemented yet") @@ -584,9 +584,9 @@ def test_endpoint_or_builder_present(self): has_endpoint = bool(config.endpoint) has_builder = bool(config.endpoint_builder) - assert has_endpoint or has_builder, ( - f"{module_name} needs endpoint or endpoint_builder" - ) + assert ( + has_endpoint or has_builder + ), f"{module_name} needs endpoint or endpoint_builder" except ImportError: pytest.skip(f"Module {module_name} not implemented yet") From 86d49c694a49c6bf59f780508bd6a912210af718 Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Wed, 3 Dec 2025 21:09:35 -0800 Subject: [PATCH 09/26] Populate activity distance from splits data - Update _sync_activity_splits to calculate and store total distance, calories, and elevation from splits into the activities table - Fix calculate_splits_summary to include INTERVAL splits (used by treadmill activities), not just ACTIVE - Add backfill_activity_distance_from_splits method for existing data --- src/garmy/localdb/extractors.py | 6 +- src/garmy/localdb/sync.py | 120 ++++++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+), 1 deletion(-) diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 4de7a97..21cac65 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -545,7 +545,11 @@ def calculate_splits_summary(self, splits: List[Dict[str, Any]]) -> Dict[str, An Returns: Dict with total_laps and aggregated metrics """ - active_splits = [s for s in splits if s.get("intensity_type") == "ACTIVE"] + # Include ACTIVE, INTERVAL, and other non-REST splits + # (treadmill runs use INTERVAL, outdoor runs use ACTIVE) + active_splits = [ + s for s in splits if s.get("intensity_type") not in (None, "REST") + ] if not active_splits: return {"total_laps": len(splits)} diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index 97b92d3..8fe7967 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -406,6 +406,31 @@ def _sync_activity_splits( if splits: self.db.store_activity_splits(user_id, activity_id, splits) + # Calculate totals from splits and update activity record + summary = self.extractor.calculate_splits_summary(splits) + activity_updates = {} + + # Update distance if available from splits + if summary.get("total_distance_meters"): + activity_updates["distance_meters"] = summary[ + "total_distance_meters" + ] + + # Update calories if available from splits + if summary.get("total_calories"): + activity_updates["calories"] = int(summary["total_calories"]) + + # Update elevation if available from splits + if summary.get("total_elevation_gain"): + activity_updates["elevation_gain"] = summary[ + "total_elevation_gain" + ] + + if activity_updates: + self.db.update_activity_details( + user_id, activity_id, activity_updates + ) + except Exception as e: self.progress.warning( f"Failed to sync splits for activity {activity_id}: {e}" @@ -574,6 +599,101 @@ def backfill_activity_splits( ) return stats + def backfill_activity_distance_from_splits(self, user_id: int) -> Dict[str, int]: + """Backfill distance/calories/elevation for activities from existing splits. + + This updates activities that have splits stored but don't have distance + populated in the main activities table. Useful for fixing activities + synced before this feature was added. + + Args: + user_id: User identifier + + Returns: + Dict with update statistics + """ + stats = {"updated": 0, "skipped": 0, "failed": 0, "total": 0} + + # Get activities that have splits but NULL distance + activities = self._get_activities_with_splits_missing_distance(user_id) + stats["total"] = len(activities) + + self.progress.info( + f"Backfilling distance for {len(activities)} activities from splits" + ) + + for activity in activities: + activity_id = activity["activity_id"] + try: + # Get splits for this activity + splits = self.db.get_activity_splits(user_id, activity_id) + if not splits: + stats["skipped"] += 1 + continue + + # Calculate totals from splits + summary = self.extractor.calculate_splits_summary(splits) + activity_updates = {} + + if summary.get("total_distance_meters"): + activity_updates["distance_meters"] = summary["total_distance_meters"] + + if summary.get("total_calories"): + activity_updates["calories"] = int(summary["total_calories"]) + + if summary.get("total_elevation_gain"): + activity_updates["elevation_gain"] = summary["total_elevation_gain"] + + if activity_updates: + self.db.update_activity_details(user_id, activity_id, activity_updates) + stats["updated"] += 1 + else: + stats["skipped"] += 1 + + except Exception as e: + self.progress.warning( + f"Failed to backfill distance for activity {activity_id}: {e}" + ) + stats["failed"] += 1 + + self.progress.info( + f"Distance backfill complete: {stats['updated']} updated, " + f"{stats['skipped']} skipped, {stats['failed']} failed" + ) + return stats + + def _get_activities_with_splits_missing_distance( + self, user_id: int + ) -> List[Dict[str, Any]]: + """Get activities that have splits but no distance in main table.""" + with self.db.get_session() as session: + from sqlalchemy import and_, exists + + from .models import Activity, ActivitySplit + + # Subquery to find activities with splits + has_splits = exists().where( + and_( + ActivitySplit.user_id == Activity.user_id, + ActivitySplit.activity_id == Activity.activity_id, + ) + ) + + activities = ( + session.query(Activity) + .filter( + and_( + Activity.user_id == user_id, + Activity.distance_meters.is_(None), + has_splits, + ) + ) + .order_by(Activity.activity_date.desc()) + .all() + ) + + return [self.db._activity_to_dict(a) for a in activities] + def _get_cardio_activities_without_splits( self, user_id: int, limit: int ) -> List[Dict[str, Any]]: From 08257d0b5850189f7dba7f3f15cc0403fb618496 Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Thu, 4 Dec 2025 10:23:10 -0800 Subject: [PATCH 10/26] Add .DS_Store to .gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 85e6e08..4ae819d 100644 --- a/.gitignore +++ b/.gitignore @@ -128,6 +128,7 @@ celerybeat.pid *.sage.py # Environments +.DS_Store .env .venv env/ @@ -213,4 +214,4 @@ log.txt *.broken test_*coverage*.py test_remaining*.py -test_final*.py \ No newline at end of file +test_final*.py From 8b484de01237887507726f68a8b372ec1f865544 Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Thu, 4 Dec 2025 11:48:12 -0800 Subject: [PATCH 11/26] Add multi-profile support via --profile-path and GARMY_PROFILE_PATH Enable isolated profile directories with separate tokens and databases for multi-user scenarios. CLI argument takes priority over env var, with ~/.garmy/ as default fallback. Backwards compatible. --- README.md | 98 ++++++++++++++++++++++++++++++++++++++- src/garmy/auth/client.py | 23 ++++++++- src/garmy/core/config.py | 27 +++++++++++ src/garmy/localdb/cli.py | 94 +++++++++++++++++++++++++++++++++---- src/garmy/localdb/sync.py | 9 +++- src/garmy/mcp/cli.py | 78 ++++++++++++++++++++++++++++--- 6 files changed, 309 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 1f360d6..ab8ba43 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ An AI-powered Python library for Garmin Connect API designed specifically for he - **🏥 Health Analytics**: Advanced data analysis capabilities for fitness and wellness insights - **📊 Rich Metrics**: Complete access to sleep, heart rate, stress, training readiness, and more - **💾 Local Database**: Built-in SQLite database for local health data storage and sync +- **👥 Multi-Profile Support**: Manage multiple Garmin accounts with isolated profile directories - **🖥️ CLI Tools**: Command-line interfaces for data synchronization and MCP server management - **🤖 MCP Server**: Model Context Protocol server for AI assistant integration (Claude Desktop) - **⚡ High Performance**: Optimized for high-performance AI applications @@ -89,6 +90,37 @@ garmy-mcp info --database health.db garmy-mcp config ``` +### Multi-Profile Support + +Garmy supports multiple Garmin accounts through profile directories. Each profile contains its own authentication tokens and database. + +```bash +# Using --profile-path (note: must come before the subcommand) +garmy-sync --profile-path ~/profiles/user1 sync --last-days 7 +garmy-sync --profile-path ~/profiles/user2 sync --last-days 7 + +# Using environment variable +export GARMY_PROFILE_PATH=~/profiles/user1 +garmy-sync sync --last-days 7 + +# MCP server with profile +garmy-mcp server --profile-path ~/profiles/user1 +``` + +**Profile Directory Structure:** +``` +~/profiles/user1/ +├── oauth1_token.json # Garmin OAuth1 credentials +├── oauth2_token.json # Garmin OAuth2 credentials +├── health.db # User's health database +└── logs/ # Sync logs +``` + +**Priority Order:** +1. `--profile-path` CLI argument (highest) +2. `GARMY_PROFILE_PATH` environment variable +3. `~/.garmy/` default directory (fallback) + ### AI Assistant Integration (Claude Desktop) Add to your Claude Desktop configuration (`~/.claude_desktop_config.json`): @@ -104,6 +136,35 @@ Add to your Claude Desktop configuration (`~/.claude_desktop_config.json`): } ``` +**Using profiles with Claude Desktop:** + +```json +{ + "mcpServers": { + "garmy-localdb": { + "command": "garmy-mcp", + "args": ["server", "--profile-path", "/path/to/profiles/user1", "--max-rows", "500"] + } + } +} +``` + +Or using environment variables: + +```json +{ + "mcpServers": { + "garmy-localdb": { + "command": "garmy-mcp", + "args": ["server", "--max-rows", "500"], + "env": { + "GARMY_PROFILE_PATH": "/path/to/profiles/user1" + } + } + } +} +``` + Now ask Claude: *"What health data do I have available? Analyze my sleep patterns over the last month."* ## 📊 Available Health Metrics @@ -187,14 +248,25 @@ def health_agent(): garmy-sync sync --last-days 90 # Sync 3 months of data garmy-mcp server --database health.db # Start MCP server # Use Claude Desktop or Python to analyze trends, correlations, patterns + +# Multi-user household analysis +garmy-sync --profile-path ~/profiles/user1 sync --last-days 90 +garmy-sync --profile-path ~/profiles/user2 sync --last-days 90 +# Each profile has isolated credentials and database ``` -### For Health Researchers +### For Health Researchers ```python # Large-scale health data collection from garmy.localdb import SyncManager +from pathlib import Path -sync_manager = SyncManager(db_path="research_data.db") +# Using a profile directory for tokens and database +profile_path = Path.home() / "profiles" / "researcher" +sync_manager = SyncManager( + db_path=profile_path / "health.db", + token_dir=str(profile_path) # Tokens stored in profile directory +) sync_manager.initialize(email, password) # Collect comprehensive health dataset @@ -213,6 +285,28 @@ stats = sync_manager.sync_range( - **🛡️ Query Validation**: SQL injection prevention and query limits - **🔑 Secure Auth**: OAuth token management with automatic refresh - **🚫 No Data Sharing**: Health data never leaves your local environment +- **👥 Profile Isolation**: Each profile has separate credentials and database + +## ⚙️ Configuration + +### Environment Variables + +| Variable | Description | +|----------|-------------| +| `GARMY_PROFILE_PATH` | Profile directory path (contains tokens and database) | +| `GARMY_DB_PATH` | Database file path (for MCP server, overridden by `--database`) | + +### Shell Configuration Example + +Add to your `~/.zshrc` or `~/.bashrc`: + +```bash +# Set default profile +export GARMY_PROFILE_PATH="$HOME/Services/Garmy/profiles/default" + +# Optional: Activate venv alias +alias garmy-activate="source ~/Services/Garmy/.venv/bin/activate" +``` ## 🧪 Examples diff --git a/src/garmy/auth/client.py b/src/garmy/auth/client.py index aa08d8e..f287b60 100644 --- a/src/garmy/auth/client.py +++ b/src/garmy/auth/client.py @@ -9,6 +9,7 @@ """ import json +import os from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, Literal, Optional, Tuple, Union @@ -104,10 +105,24 @@ class TokenFileManager: def __init__(self, token_dir: Optional[str] = None): """Initialize the token file manager. + Token directory resolution priority: + 1. Explicit token_dir parameter + 2. GARMY_PROFILE_PATH environment variable + 3. Default: ~/.garmy/ + Args: token_dir: Directory path for storing tokens. """ - self.token_dir = token_dir or str(Path.home() / ".garmy") + if token_dir: + self.token_dir = token_dir + else: + # Check environment variable for profile path + profile_path = os.getenv("GARMY_PROFILE_PATH") + if profile_path: + self.token_dir = str(Path(profile_path).expanduser()) + else: + # Default fallback + self.token_dir = str(Path.home() / ".garmy") def load_tokens(self) -> Tuple[Optional[OAuth1Token], Optional[OAuth2Token]]: """Load authentication tokens from persistent storage. @@ -324,7 +339,11 @@ def __init__( domain: Garmin domain to authenticate with timeout: Request timeout in seconds retries: Number of retry attempts for failed requests - token_dir: Directory path for storing tokens (defaults to ~/.garmy) + token_dir: Directory path for storing tokens. + Resolution priority: + 1. This parameter if provided + 2. GARMY_PROFILE_PATH environment variable + 3. Default: ~/.garmy/ """ self.domain = domain diff --git a/src/garmy/core/config.py b/src/garmy/core/config.py index 7c87372..90c64d5 100644 --- a/src/garmy/core/config.py +++ b/src/garmy/core/config.py @@ -242,6 +242,10 @@ class GarmyConfig: oauth_consumer_key: str = "" oauth_consumer_secret: str = "" + # Profile path for multi-user support + # When set, tokens and database are stored/loaded from this directory + profile_path: Optional[str] = None + @classmethod def from_environment(cls) -> "GarmyConfig": """Create configuration from environment variables.""" @@ -271,6 +275,7 @@ def safe_int(env_var: str, default: int) -> int: oauth_consumer_secret=os.getenv( "GARMY_OAUTH_CONSUMER_SECRET", OAuthCredentials.DEFAULT_CONSUMER_SECRET ), + profile_path=os.getenv("GARMY_PROFILE_PATH"), ) @@ -400,3 +405,25 @@ def get_app_headers(platform: str = "ios") -> dict: "accept": "*/*", "accept-encoding": "gzip, deflate, br", } + + +def get_profile_path() -> Optional[str]: + """Get the profile path from configuration. + + The profile path is a directory containing user-specific data: + - OAuth tokens (oauth1_token.json, oauth2_token.json) + - Health database (health.db) + - Logs (logs/) + + Returns: + Profile path string if set, None otherwise. + When None, components should fall back to their defaults (e.g., ~/.garmy/). + + Environment Variables: + GARMY_PROFILE_PATH: Path to profile directory + + Example: + export GARMY_PROFILE_PATH="/path/to/profiles/user1" + """ + config = get_config() + return config.profile_path diff --git a/src/garmy/localdb/cli.py b/src/garmy/localdb/cli.py index 0116ee7..54f18ce 100644 --- a/src/garmy/localdb/cli.py +++ b/src/garmy/localdb/cli.py @@ -3,10 +3,11 @@ import argparse import getpass +import os import sys from datetime import date, timedelta from pathlib import Path -from typing import List, Optional +from typing import List, Optional, Tuple from .config import LocalDBConfig from .models import MetricType @@ -14,6 +15,47 @@ from .sync import SyncManager +def resolve_paths(args: argparse.Namespace) -> Tuple[Path, Optional[str]]: + """Resolve database path and token directory from arguments. + + Priority for profile path: + 1. --profile-path CLI argument + 2. GARMY_PROFILE_PATH environment variable + + When profile path is set: + - db_path = /health.db + - token_dir = / + + When profile path is not set: + - db_path = --db-path argument or default (health.db) + - token_dir = None (uses default ~/.garmy/) + + Args: + args: Parsed command-line arguments + + Returns: + Tuple of (db_path, token_dir) + """ + # Check for profile path (CLI takes precedence over env var) + profile_path = args.profile_path + if profile_path is None: + env_profile = os.getenv("GARMY_PROFILE_PATH") + if env_profile: + profile_path = Path(env_profile).expanduser() + + if profile_path: + # Use profile path for both database and tokens + profile_path = Path(profile_path).expanduser() + db_path = profile_path / "health.db" + token_dir = str(profile_path) + return db_path, token_dir + else: + # Use individual paths + db_path = args.db_path if args.db_path else Path("health.db") + token_dir = None # Will use default ~/.garmy/ + return db_path, token_dir + + def parse_date(date_str: str) -> date: """Parse date string in YYYY-MM-DD format.""" try: @@ -66,6 +108,9 @@ def get_credentials() -> tuple[str, str]: def cmd_sync(args) -> int: """Execute sync command.""" try: + # Resolve paths from profile or individual arguments + db_path, token_dir = resolve_paths(args) + # Determine date range if args.last_days: end_date = date.today() @@ -78,6 +123,7 @@ def cmd_sync(args) -> int: start_date = end_date - timedelta(days=6) print(f"Syncing data from {start_date} to {end_date}") + print(f"Database: {db_path}") # Setup progress reporter progress_reporter = ProgressReporter(use_tqdm=args.progress == "tqdm") @@ -85,7 +131,10 @@ def cmd_sync(args) -> int: # Initialize sync manager config = LocalDBConfig() manager = SyncManager( - db_path=args.db_path, config=config, progress_reporter=progress_reporter + db_path=db_path, + config=config, + progress_reporter=progress_reporter, + token_dir=token_dir, ) # Try to initialize with saved tokens first @@ -133,7 +182,9 @@ def cmd_status(args) -> int: try: from .db import HealthDB - db = HealthDB(args.db_path) + # Resolve paths from profile or individual arguments + db_path, _ = resolve_paths(args) + db = HealthDB(db_path) # Show overall statistics with db.get_session() as session: @@ -229,7 +280,9 @@ def cmd_reset(args) -> int: try: from .db import HealthDB - db = HealthDB(args.db_path) + # Resolve paths from profile or individual arguments + db_path, _ = resolve_paths(args) + db = HealthDB(db_path) with db.get_session() as session: from .models import SyncStatus @@ -272,13 +325,19 @@ def cmd_reset(args) -> int: def cmd_backfill(args) -> int: """Backfill activity details for existing activities.""" try: + # Resolve paths from profile or individual arguments + db_path, token_dir = resolve_paths(args) + # Setup progress reporter progress_reporter = ProgressReporter(use_tqdm=args.progress == "tqdm") # Initialize sync manager config = LocalDBConfig() manager = SyncManager( - db_path=args.db_path, config=config, progress_reporter=progress_reporter + db_path=db_path, + config=config, + progress_reporter=progress_reporter, + token_dir=token_dir, ) # Try to initialize with saved tokens first @@ -317,13 +376,19 @@ def cmd_backfill(args) -> int: def cmd_backfill_splits(args) -> int: """Backfill splits for cardio activities.""" try: + # Resolve paths from profile or individual arguments + db_path, token_dir = resolve_paths(args) + # Setup progress reporter progress_reporter = ProgressReporter(use_tqdm=args.progress == "tqdm") # Initialize sync manager config = LocalDBConfig() manager = SyncManager( - db_path=args.db_path, config=config, progress_reporter=progress_reporter + db_path=db_path, + config=config, + progress_reporter=progress_reporter, + token_dir=token_dir, ) # Try to initialize with saved tokens first @@ -368,19 +433,32 @@ def create_parser() -> argparse.ArgumentParser: %(prog)s sync --last-days 7 # Sync last 7 days %(prog)s sync --date-range 2024-01-01 2024-01-31 # Sync date range %(prog)s sync --metrics DAILY_SUMMARY,SLEEP # Sync specific metrics + %(prog)s sync --profile-path ~/profiles/user1 # Sync using profile directory %(prog)s status # Show sync status %(prog)s reset --force # Reset failed records %(prog)s backfill --limit 50 # Backfill activity details %(prog)s backfill-splits --limit 50 # Backfill splits for cardio + +Environment Variables: + GARMY_PROFILE_PATH Profile directory path (contains tokens and database) """, ) # Global options + parser.add_argument( + "--profile-path", + type=Path, + default=None, + help="Path to profile directory containing tokens and database. " + "When set, uses /health.db for database and / for tokens. " + "Can also be set via GARMY_PROFILE_PATH environment variable.", + ) parser.add_argument( "--db-path", type=Path, - default=Path("health.db"), - help="Path to SQLite database file (default: health.db)", + default=None, + help="Path to SQLite database file. If --profile-path is set, this is ignored. " + "(default: health.db in current directory or profile directory)", ) parser.add_argument( "--user-id", diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index 8fe7967..aaf9d31 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -22,6 +22,7 @@ def __init__( db_path: Path = Path("health.db"), config: Optional[LocalDBConfig] = None, progress_reporter: Optional[ProgressReporter] = None, + token_dir: Optional[str] = None, ): """Initialize sync manager. @@ -29,9 +30,15 @@ def __init__( db_path: Path to SQLite database file. config: Configuration object. progress_reporter: Custom progress reporter. + token_dir: Directory path for authentication tokens. + Resolution priority: + 1. This parameter if provided + 2. GARMY_PROFILE_PATH environment variable + 3. Default: ~/.garmy/ """ self.db_path = db_path self.config = config if config is not None else LocalDBConfig() + self.token_dir = token_dir self.db = HealthDB(db_path, self.config.database) self.progress = progress_reporter or ProgressReporter() @@ -50,7 +57,7 @@ def initialize(self, email: Optional[str] = None, password: Optional[str] = None try: from garmy import APIClient, AuthClient - auth_client = AuthClient() + auth_client = AuthClient(token_dir=self.token_dir) # Check if already authenticated with saved tokens if not auth_client.is_authenticated: diff --git a/src/garmy/mcp/cli.py b/src/garmy/mcp/cli.py index d8a9167..78bc780 100644 --- a/src/garmy/mcp/cli.py +++ b/src/garmy/mcp/cli.py @@ -20,6 +20,39 @@ def create_mcp_server(*args, **kwargs): ) +def resolve_db_path(args) -> Optional[str]: + """Resolve database path from arguments. + + Priority: + 1. --database CLI argument + 2. --profile-path CLI argument (derives /health.db) + 3. GARMY_PROFILE_PATH environment variable (derives /health.db) + 4. GARMY_DB_PATH environment variable + + Args: + args: Parsed command-line arguments + + Returns: + Database path string or None if not resolvable + """ + # Priority 1: Explicit database path + if args.database: + return args.database + + # Priority 2: Profile path from CLI + profile_path = getattr(args, "profile_path", None) + if profile_path: + return str(Path(profile_path).expanduser() / "health.db") + + # Priority 3: Profile path from environment + env_profile = os.environ.get("GARMY_PROFILE_PATH") + if env_profile: + return str(Path(env_profile).expanduser() / "health.db") + + # Priority 4: Database path from environment + return os.environ.get("GARMY_DB_PATH") + + def validate_database_path(db_path: str) -> Path: """Validate database path exists and is accessible. @@ -49,12 +82,13 @@ def validate_database_path(db_path: str) -> Path: def cmd_server(args): """Start MCP server with specified configuration.""" - # Determine database path - db_path_str = args.database or os.environ.get("GARMY_DB_PATH") + # Resolve database path from arguments/environment + db_path_str = resolve_db_path(args) if not db_path_str: print( - "Error: Database path must be provided via --database argument or GARMY_DB_PATH environment variable", + "Error: Database path must be provided via --database, --profile-path, " + "or GARMY_DB_PATH/GARMY_PROFILE_PATH environment variable", file=sys.stderr, ) sys.exit(1) @@ -121,12 +155,13 @@ def cmd_server(args): def cmd_info(args): """Show information about the database and MCP server configuration.""" - # Determine database path - db_path_str = args.database or os.environ.get("GARMY_DB_PATH") + # Resolve database path from arguments/environment + db_path_str = resolve_db_path(args) if not db_path_str: print( - "Error: Database path must be provided via --database argument or GARMY_DB_PATH environment variable", + "Error: Database path must be provided via --database, --profile-path, " + "or GARMY_DB_PATH/GARMY_PROFILE_PATH environment variable", file=sys.stderr, ) sys.exit(1) @@ -191,6 +226,7 @@ def cmd_config(args): print("\\n📋 Basic Usage:") print(" garmy-mcp server --database health.db") + print(" garmy-mcp server --profile-path ~/Services/Garmy/profiles/user1") print("\\n🏭 Production Configuration (restrictive):") print(" garmy-mcp server --database health.db \\\\") @@ -214,6 +250,17 @@ def cmd_config(args): print(' "mcpServers": {') print(' "garmy-localdb": {') print(' "command": "garmy-mcp",') + print( + ' "args": ["server", "--profile-path", "/path/to/profiles/user1", "--max-rows", "500"]' + ) + print(" }") + print(" }") + print(" }") + print("\\n Or with direct database path:") + print(" {") + print(' "mcpServers": {') + print(' "garmy-localdb": {') + print(' "command": "garmy-mcp",') print( ' "args": ["server", "--database", "/path/to/health.db", "--max-rows", "500"]' ) @@ -239,9 +286,14 @@ def create_parser(): epilog=""" Examples: garmy-mcp server --database health.db + garmy-mcp server --profile-path ~/profiles/user1 garmy-mcp info --database health.db garmy-mcp config - + +Environment Variables: + GARMY_PROFILE_PATH Profile directory path (derives database as /health.db) + GARMY_DB_PATH Direct database path + Use 'garmy-mcp --help' for command-specific help. """, ) @@ -257,6 +309,12 @@ def create_parser(): description="Start the MCP server with specified configuration", ) + server_parser.add_argument( + "--profile-path", + type=str, + help="Path to profile directory. Database path derived as /health.db. " + "Can also be set via GARMY_PROFILE_PATH environment variable.", + ) server_parser.add_argument( "--database", "-d", type=str, help="Path to Garmin LocalDB SQLite database file" ) @@ -303,6 +361,12 @@ def create_parser(): description="Display information about the database and available MCP tools", ) + info_parser.add_argument( + "--profile-path", + type=str, + help="Path to profile directory. Database path derived as /health.db. " + "Can also be set via GARMY_PROFILE_PATH environment variable.", + ) info_parser.add_argument( "--database", "-d", type=str, help="Path to Garmin LocalDB SQLite database file" ) From 2ed2a7166d89fcc6593189302e4fe78e65290011 Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Tue, 9 Dec 2025 18:33:24 -0800 Subject: [PATCH 12/26] Add HTTP/SSE network transport support to MCP server - Add --transport option (stdio, http, sse) with stdio as default - Add --host and --port options for network transports - Add security warnings when exposing server on all interfaces - Validate transport and port settings in MCPConfig - Update CLI help with network transport examples and SSH tunneling --- src/garmy/mcp/cli.py | 173 +++++++++++++++++++++++++++++++++------- src/garmy/mcp/config.py | 19 +++++ 2 files changed, 162 insertions(+), 30 deletions(-) diff --git a/src/garmy/mcp/cli.py b/src/garmy/mcp/cli.py index 78bc780..ffecb56 100644 --- a/src/garmy/mcp/cli.py +++ b/src/garmy/mcp/cli.py @@ -116,6 +116,45 @@ def cmd_server(args): ) sys.exit(1) + # Validate transport configuration + if args.transport in ("http", "sse"): + # Validate port range + if args.port < 1 or args.port > 65535: + print("Error: --port must be between 1 and 65535", file=sys.stderr) + sys.exit(1) + + # Warn about privileged ports + if args.port < 1024: + print( + "Warning: Ports below 1024 may require root privileges", + file=sys.stderr, + ) + + # Security warning for network exposure + if args.host == "0.0.0.0": + print("=" * 60, file=sys.stderr) + print("⚠️ SECURITY WARNING", file=sys.stderr) + print("=" * 60, file=sys.stderr) + print( + "Binding to 0.0.0.0 exposes the server to your entire network.", + file=sys.stderr, + ) + print( + "The MCP protocol does not provide authentication or encryption.", + file=sys.stderr, + ) + print( + "Anyone on your network can access your health data.", + file=sys.stderr, + ) + print("", file=sys.stderr) + print("Recommendations:", file=sys.stderr) + print(" - Use firewall rules to restrict access", file=sys.stderr) + print(" - Use SSH tunneling for remote access", file=sys.stderr) + print(" - Consider using 127.0.0.1 for localhost-only", file=sys.stderr) + print("=" * 60, file=sys.stderr) + print("", file=sys.stderr) + # Create config with CLI parameters config = MCPConfig( db_path=db_path, @@ -123,24 +162,56 @@ def cmd_server(args): max_rows_absolute=args.max_rows_absolute, enable_query_logging=args.enable_query_logging, strict_validation=not args.disable_strict_validation, + transport=args.transport, + host=args.host, + port=args.port, ) if args.verbose: - print(f"Starting Garmin LocalDB MCP Server...") + print("Starting Garmin LocalDB MCP Server...") print(f"Database: {db_path}") - print(f"Configuration:") + print("Configuration:") print(f" - Read-only access: enabled") print(f" - Max rows per query: {config.max_rows}") print(f" - Max rows absolute limit: {config.max_rows_absolute}") print(f" - Query logging: {config.enable_query_logging}") print(f" - Strict validation: {config.strict_validation}") + print(f" - Transport: {config.transport}") + + # Show network details for non-stdio transports + if config.transport in ("http", "sse"): + print(f" - Host: {config.host}") + print(f" - Port: {config.port}") + print("") + print("Network Access:") + print(f" - Server URL: http://{config.host}:{config.port}") + if config.transport == "sse": + print(f" - SSE Endpoint: http://{config.host}:{config.port}/sse") + print("") + if config.host == "127.0.0.1": + print("Note: Server bound to localhost only (127.0.0.1)") + print(" Use --host 0.0.0.0 for network access") + elif config.host == "0.0.0.0": + print("WARNING: Server exposed on ALL network interfaces") + print("") + print( - f"Available tools: explore_database_structure, get_table_details, execute_sql_query, get_health_summary" + "Available tools: explore_database_structure, get_table_details, " + "execute_sql_query, get_health_summary" ) # Create and run server with explicit config mcp_server = create_mcp_server(config) - mcp_server.run() + + # Run with transport configuration + if config.transport == "stdio": + mcp_server.run() + else: + mcp_server.run( + transport=config.transport, + host=config.host, + port=config.port, + ) except (FileNotFoundError, PermissionError, ValueError) as e: print(f"Error: {e}", file=sys.stderr) @@ -224,57 +295,62 @@ def cmd_config(args): print("Garmin LocalDB MCP Server - Configuration Examples") print("=" * 50) - print("\\n📋 Basic Usage:") + print("\n📋 Basic Usage (stdio - for Claude Desktop):") print(" garmy-mcp server --database health.db") print(" garmy-mcp server --profile-path ~/Services/Garmy/profiles/user1") - print("\\n🏭 Production Configuration (restrictive):") + print("\n🌐 Network Usage (HTTP transport):") + print(" # Localhost only (secure)") + print(" garmy-mcp server --database health.db --transport http --port 8000") + print("") + print(" # Local network (WARNING: no authentication)") + print(" garmy-mcp server --database health.db --transport http \\\\") + print(" --host 0.0.0.0 --port 8080") + + print("\n🔒 Secure Remote Access (via SSH tunnel):") + print(" # On remote server:") + print(" garmy-mcp server --database health.db --transport http --port 8000") + print("") + print(" # On local machine:") + print(" ssh -L 8000:localhost:8000 user@remote-server") + print(" # Then connect to http://localhost:8000 locally") + + print("\n🏭 Production Configuration (restrictive):") print(" garmy-mcp server --database health.db \\\\") print(" --max-rows 100 \\\\") print(" --max-rows-absolute 500") - print("\\n🔧 Development Configuration (permissive with logging):") + print("\n🔧 Development Configuration (permissive with logging):") print(" garmy-mcp server --database health.db \\\\") + print(" --transport http --port 8000 \\\\") print(" --max-rows 2000 \\\\") print(" --enable-query-logging \\\\") print(" --verbose") - print("\\n🐛 Debug Configuration (relaxed validation):") - print(" garmy-mcp server --database health.db \\\\") - print(" --disable-strict-validation \\\\") - print(" --enable-query-logging \\\\") - print(" --verbose") - - print("\\n🤖 Claude Desktop Integration:") - print(" {") - print(' "mcpServers": {') - print(' "garmy-localdb": {') - print(' "command": "garmy-mcp",') - print( - ' "args": ["server", "--profile-path", "/path/to/profiles/user1", "--max-rows", "500"]' - ) - print(" }") - print(" }") - print(" }") - print("\\n Or with direct database path:") + print("\n🤖 Claude Desktop Integration (stdio):") print(" {") print(' "mcpServers": {') print(' "garmy-localdb": {') print(' "command": "garmy-mcp",') print( - ' "args": ["server", "--database", "/path/to/health.db", "--max-rows", "500"]' + ' "args": ["server", "--profile-path", "/path/to/profiles/user1"]' ) print(" }") print(" }") print(" }") - print("\\n🔐 Security Settings:") + print("\n🔐 Security Settings:") print(" --max-rows: Limit rows per query (default: 1000, max: 5000)") print(" --max-rows-absolute: Hard security limit (default: 5000, max: 10000)") print(" --enable-query-logging: Log all SQL queries for debugging") - print( - " --disable-strict-validation: Allow relaxed SQL validation (not recommended)" - ) + + print("\n🌐 Network Transport Settings:") + print(" --transport: stdio (default), http (recommended), or sse (legacy)") + print(" --host: IP address to bind (default: 127.0.0.1)") + print(" --port: Port number (default: 8000)") + print("") + print(" WARNING: Network transports expose health data without authentication.") + print(" Use localhost binding (127.0.0.1) and SSH tunneling for remote access.") def create_parser(): @@ -285,8 +361,17 @@ def create_parser(): formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: + # Local stdio transport (default, for Claude Desktop) garmy-mcp server --database health.db garmy-mcp server --profile-path ~/profiles/user1 + + # HTTP network transport + garmy-mcp server --database health.db --transport http --port 8000 + + # HTTP on local network (all interfaces) + garmy-mcp server --database health.db --transport http --host 0.0.0.0 --port 8080 + + # Info and config commands garmy-mcp info --database health.db garmy-mcp config @@ -294,6 +379,10 @@ def create_parser(): GARMY_PROFILE_PATH Profile directory path (derives database as /health.db) GARMY_DB_PATH Direct database path +Security Note: + Network transports (http/sse) expose health data without authentication. + Use localhost binding (127.0.0.1) and SSH tunneling for remote access. + Use 'garmy-mcp --help' for command-specific help. """, ) @@ -352,6 +441,30 @@ def create_parser(): help="Enable verbose logging and configuration display", ) + server_parser.add_argument( + "--transport", + type=str, + choices=["stdio", "http", "sse"], + default="stdio", + help="Transport protocol: stdio (default, for Claude Desktop), " + "http (recommended for network), or sse (legacy network)", + ) + + server_parser.add_argument( + "--host", + type=str, + default="127.0.0.1", + help="Host address to bind for network transports (default: 127.0.0.1). " + "Use 0.0.0.0 to expose on all interfaces (WARNING: no authentication)", + ) + + server_parser.add_argument( + "--port", + type=int, + default=8000, + help="Port for network transports (default: 8000, range: 1024-65535 recommended)", + ) + server_parser.set_defaults(func=cmd_server) # Info command diff --git a/src/garmy/mcp/config.py b/src/garmy/mcp/config.py index 33caad6..a1a2547 100644 --- a/src/garmy/mcp/config.py +++ b/src/garmy/mcp/config.py @@ -20,6 +20,11 @@ class MCPConfig: enable_query_logging: bool = False strict_validation: bool = True + # Network/transport settings + transport: str = "stdio" + host: str = "127.0.0.1" + port: int = 8000 + @classmethod def from_db_path(cls, db_path: Path, **kwargs) -> "MCPConfig": """Create config with database path and optional overrides.""" @@ -38,3 +43,17 @@ def validate(self) -> None: if self.max_rows <= 0: raise ValueError("max_rows must be positive") + + # Validate transport settings + valid_transports = ("stdio", "http", "sse") + if self.transport not in valid_transports: + raise ValueError( + f"Invalid transport: {self.transport}. " + f"Must be one of: {', '.join(valid_transports)}" + ) + + if self.transport in ("http", "sse"): + if self.port < 1 or self.port > 65535: + raise ValueError( + f"Port must be between 1 and 65535, got {self.port}" + ) From 39010843119d3be531069fb1b3ac2311503b82a4 Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Tue, 9 Dec 2025 19:53:19 -0800 Subject: [PATCH 13/26] Add sync_health_data tool to MCP server Enable AI assistants to fetch fresh data from Garmin Connect via the --enable-sync flag. The new tool supports syncing up to 30 days of data and requires valid saved authentication tokens. Changes: - Add enable_sync and profile_path settings to MCPConfig - Add sync_health_data tool with last_days, metrics, and user_id params - Add --enable-sync CLI flag to garmy-mcp server command --- src/garmy/mcp/cli.py | 50 +++++++++++-- src/garmy/mcp/config.py | 13 +++- src/garmy/mcp/server.py | 162 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 219 insertions(+), 6 deletions(-) diff --git a/src/garmy/mcp/cli.py b/src/garmy/mcp/cli.py index ffecb56..b7642df 100644 --- a/src/garmy/mcp/cli.py +++ b/src/garmy/mcp/cli.py @@ -155,6 +155,13 @@ def cmd_server(args): print("=" * 60, file=sys.stderr) print("", file=sys.stderr) + # Resolve profile path for sync support + profile_path = None + if hasattr(args, "profile_path") and args.profile_path: + profile_path = Path(args.profile_path).expanduser() + elif os.environ.get("GARMY_PROFILE_PATH"): + profile_path = Path(os.environ["GARMY_PROFILE_PATH"]).expanduser() + # Create config with CLI parameters config = MCPConfig( db_path=db_path, @@ -165,6 +172,8 @@ def cmd_server(args): transport=args.transport, host=args.host, port=args.port, + enable_sync=args.enable_sync, + profile_path=profile_path, ) if args.verbose: @@ -195,10 +204,21 @@ def cmd_server(args): print("WARNING: Server exposed on ALL network interfaces") print("") - print( - "Available tools: explore_database_structure, get_table_details, " - "execute_sql_query, get_health_summary" - ) + tools_list = [ + "explore_database_structure", + "get_table_details", + "execute_sql_query", + "get_health_summary", + ] + if config.enable_sync: + tools_list.append("sync_health_data") + print(f" - Sync enabled: yes") + if config.profile_path: + print(f" - Token directory: {config.profile_path}") + else: + print(f" - Sync enabled: no (use --enable-sync to enable)") + + print(f"Available tools: {', '.join(tools_list)}") # Create and run server with explicit config mcp_server = create_mcp_server(config) @@ -333,7 +353,7 @@ def cmd_config(args): print(' "garmy-localdb": {') print(' "command": "garmy-mcp",') print( - ' "args": ["server", "--profile-path", "/path/to/profiles/user1"]' + ' "args": ["server", "--profile-path", "/path/to/profiles/user1", "--enable-sync"]' ) print(" }") print(" }") @@ -352,6 +372,16 @@ def cmd_config(args): print(" WARNING: Network transports expose health data without authentication.") print(" Use localhost binding (127.0.0.1) and SSH tunneling for remote access.") + print("\n🔄 Sync Settings:") + print(" --enable-sync: Enable the sync_health_data tool") + print(" --profile-path: Path to profile directory (contains tokens and database)") + print("") + print(" The sync tool allows AI assistants to fetch fresh data from Garmin Connect.") + print(" Requires valid saved authentication tokens - run 'garmy-sync sync' first.") + print("") + print(" Example with sync enabled:") + print(" garmy-mcp server --profile-path ~/profiles/user1 --enable-sync") + def create_parser(): """Create argument parser with subcommands.""" @@ -365,6 +395,9 @@ def create_parser(): garmy-mcp server --database health.db garmy-mcp server --profile-path ~/profiles/user1 + # With sync enabled (allows fetching fresh data from Garmin) + garmy-mcp server --profile-path ~/profiles/user1 --enable-sync + # HTTP network transport garmy-mcp server --database health.db --transport http --port 8000 @@ -465,6 +498,13 @@ def create_parser(): help="Port for network transports (default: 8000, range: 1024-65535 recommended)", ) + server_parser.add_argument( + "--enable-sync", + action="store_true", + help="Enable the sync_health_data tool to fetch fresh data from Garmin Connect. " + "Requires valid saved authentication tokens (run 'garmy-sync sync' first to authenticate).", + ) + server_parser.set_defaults(func=cmd_server) # Info command diff --git a/src/garmy/mcp/config.py b/src/garmy/mcp/config.py index a1a2547..f9164a2 100644 --- a/src/garmy/mcp/config.py +++ b/src/garmy/mcp/config.py @@ -1,6 +1,6 @@ """Configuration management for Garmin LocalDB MCP Server.""" -from dataclasses import dataclass +from dataclasses import dataclass, field from pathlib import Path from typing import Optional @@ -25,11 +25,22 @@ class MCPConfig: host: str = "127.0.0.1" port: int = 8000 + # Sync settings + enable_sync: bool = False + profile_path: Optional[Path] = None + @classmethod def from_db_path(cls, db_path: Path, **kwargs) -> "MCPConfig": """Create config with database path and optional overrides.""" return cls(db_path=db_path, **kwargs) + @property + def token_dir(self) -> Optional[str]: + """Get token directory from profile path if set.""" + if self.profile_path: + return str(self.profile_path) + return None + def validate(self) -> None: """Validate configuration settings.""" if not self.db_path.exists(): diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index 3c80160..4939059 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -2,12 +2,14 @@ Provides secure, read-only access to synchronized Garmin health data through the Model Context Protocol with optimized tools for LLM understanding. +Optionally supports syncing data from Garmin Connect when enabled. """ import logging import os import re import sqlite3 +from datetime import date, timedelta from pathlib import Path from typing import Any, Dict, List, Optional @@ -408,9 +410,169 @@ def health_data_guide() -> str: """ return _get_health_data_guide() + # Only add sync tool if sync is enabled + if config.enable_sync: + _register_sync_tool(mcp, config) + return mcp +def _register_sync_tool(mcp: FastMCP, config: MCPConfig) -> None: + """Register the sync tool with the MCP server. + + Args: + mcp: The FastMCP server instance + config: MCP configuration with sync settings + """ + from ..localdb.config import LocalDBConfig + from ..localdb.progress import ProgressReporter + from ..localdb.sync import SyncManager + + # Create a simple progress reporter that collects messages + class MCPProgressReporter(ProgressReporter): + """Progress reporter that collects messages for MCP response.""" + + def __init__(self): + super().__init__(use_tqdm=False) + self.messages: List[str] = [] + + def info(self, message: str) -> None: + self.messages.append(f"[INFO] {message}") + + def warning(self, message: str) -> None: + self.messages.append(f"[WARNING] {message}") + + def error(self, message: str) -> None: + self.messages.append(f"[ERROR] {message}") + + def task_complete(self, metric: str, sync_date: date) -> None: + pass # Don't log individual task completions to reduce noise + + def task_failed(self, metric: str, sync_date: date) -> None: + self.messages.append(f"[FAILED] {metric} for {sync_date}") + + def task_skipped(self, metric: str, sync_date: date) -> None: + pass # Don't log skips to reduce noise + + @mcp.tool() + def sync_health_data( + last_days: int = 7, + metrics: Optional[str] = None, + user_id: int = 1, + ) -> Dict[str, Any]: + """WHEN TO USE: When you need to fetch fresh data from Garmin Connect. + + This tool syncs health data from Garmin Connect API to the local database. + Use this when you need the latest data that may not be in the database yet. + + IMPORTANT: Requires valid saved authentication tokens. Will fail if tokens + are expired or missing - user must run 'garmy-sync sync' manually first to + authenticate. + + Args: + last_days: Number of days to sync, counting back from today (default: 7, max: 30) + metrics: Comma-separated list of metrics to sync (default: all). + Available: DAILY_SUMMARY, SLEEP, HEART_RATE, STEPS, STRESS, + BODY_BATTERY, HRV, CALORIES, RESPIRATION, TRAINING_READINESS, + ACTIVITIES, BODY_COMPOSITION + user_id: User ID for database records (default: 1) + + Returns: + Sync statistics including completed, skipped, and failed counts + """ + # Validate parameters + if last_days < 1: + raise ValueError("last_days must be at least 1") + if last_days > 30: + raise ValueError( + "last_days cannot exceed 30 for MCP sync. " + "For larger syncs, use 'garmy-sync sync' CLI directly." + ) + if user_id < 1: + raise ValueError("user_id must be positive") + + # Parse metrics if provided + sync_metrics: Optional[List[MetricType]] = None + if metrics: + sync_metrics = [] + for name in metrics.split(","): + name = name.strip().upper() + try: + sync_metrics.append(MetricType[name]) + except KeyError: + available = ", ".join([m.name for m in MetricType]) + raise ValueError( + f"Invalid metric: {name}. Available: {available}" + ) + + # Calculate date range + end_date = date.today() + start_date = end_date - timedelta(days=last_days - 1) + + # Create progress reporter + progress = MCPProgressReporter() + + try: + # Initialize sync manager + localdb_config = LocalDBConfig() + manager = SyncManager( + db_path=config.db_path, + config=localdb_config, + progress_reporter=progress, + token_dir=config.token_dir, + ) + + # Initialize with saved tokens only (no interactive prompts) + try: + manager.initialize() + except RuntimeError as e: + return { + "success": False, + "error": "Authentication required", + "message": ( + "No valid saved tokens found. Please run " + "'garmy-sync sync' from the command line first to authenticate, " + "then try again." + ), + "details": str(e), + } + + # Execute sync + stats = manager.sync_range( + user_id=user_id, + start_date=start_date, + end_date=end_date, + metrics=sync_metrics, + ) + + return { + "success": True, + "date_range": { + "start": start_date.isoformat(), + "end": end_date.isoformat(), + }, + "statistics": { + "completed": stats["completed"], + "skipped": stats["skipped"], + "failed": stats["failed"], + "total_tasks": stats["total_tasks"], + }, + "metrics_synced": ( + [m.name for m in sync_metrics] + if sync_metrics + else [m.name for m in MetricType] + ), + "messages": progress.messages[-10:], # Last 10 messages + } + + except Exception as e: + return { + "success": False, + "error": str(e), + "messages": progress.messages[-10:], + } + + def _get_table_description(table_name: str) -> str: """Get human-readable description for table.""" descriptions = { From 5f4b120bdc1c63b2f9b4324a3867d36b518faaed Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Wed, 14 Jan 2026 21:18:52 -0800 Subject: [PATCH 14/26] Add workout management module with fuzzy exercise name matching Introduces a complete workout management system for creating, listing, scheduling, and deleting Garmin Connect workouts via the API and MCP server. New workouts module (src/garmy/workouts/): - WorkoutBuilder: Fluent API for constructing workouts with steps, repeats, targets (power/HR/cadence), and strength exercise support - WorkoutClient: CRUD operations for Garmin Connect workout-service API - WorkoutSerializer: Bidirectional conversion between models and Garmin JSON - Models: Workout, WorkoutStep, RepeatGroup, EndCondition, Target dataclasses - Constants: SportType, StepType, EndConditionType, TargetType, IntensityType Exercise name fuzzy matching (src/garmy/workouts/exercises.py): - ExerciseMatcher with hybrid token + Levenshtein matching algorithm - 800+ exercises across 30+ categories from Garmin FIT SDK - 150+ common aliases (e.g., "bench press" -> BARBELL_BENCH_PRESS) - Auto-resolution in MCP create_workout tool for user-friendly names MCP server enhancements: - list_workouts, get_workout, create_workout, schedule_workout, delete_workout - search_exercises tool for exercise discovery (no auth required) - --enable-workouts flag and GARMY_ENABLE_WORKOUTS env var --- src/garmy/core/client.py | 26 +- src/garmy/mcp/cli.py | 53 +- src/garmy/mcp/config.py | 7 +- src/garmy/mcp/server.py | 537 +++++++- src/garmy/workouts/__init__.py | 86 ++ src/garmy/workouts/builder.py | 548 ++++++++ src/garmy/workouts/client.py | 329 +++++ src/garmy/workouts/constants.py | 243 ++++ src/garmy/workouts/exercises.py | 2009 +++++++++++++++++++++++++++++ src/garmy/workouts/models.py | 304 +++++ src/garmy/workouts/serializer.py | 442 +++++++ tests/test_mcp_workouts.py | 428 ++++++ tests/test_workouts_builder.py | 337 +++++ tests/test_workouts_client.py | 401 ++++++ tests/test_workouts_constants.py | 166 +++ tests/test_workouts_exercises.py | 990 ++++++++++++++ tests/test_workouts_models.py | 281 ++++ tests/test_workouts_serializer.py | 410 ++++++ 18 files changed, 7586 insertions(+), 11 deletions(-) create mode 100644 src/garmy/workouts/__init__.py create mode 100644 src/garmy/workouts/builder.py create mode 100644 src/garmy/workouts/client.py create mode 100644 src/garmy/workouts/constants.py create mode 100644 src/garmy/workouts/exercises.py create mode 100644 src/garmy/workouts/models.py create mode 100644 src/garmy/workouts/serializer.py create mode 100644 tests/test_mcp_workouts.py create mode 100644 tests/test_workouts_builder.py create mode 100644 tests/test_workouts_client.py create mode 100644 tests/test_workouts_constants.py create mode 100644 tests/test_workouts_exercises.py create mode 100644 tests/test_workouts_models.py create mode 100644 tests/test_workouts_serializer.py diff --git a/src/garmy/core/client.py b/src/garmy/core/client.py index f9ee827..e8f3700 100644 --- a/src/garmy/core/client.py +++ b/src/garmy/core/client.py @@ -23,6 +23,7 @@ if TYPE_CHECKING: from ..auth.client import AuthClient + from ..workouts.client import WorkoutClient from .registry import MetricRegistry from urllib.parse import urljoin @@ -285,6 +286,27 @@ def metrics(self) -> "MetricRegistry": self._metrics = MetricRegistry(self) return self._metrics + @property + def workouts(self) -> "WorkoutClient": + """Get the workout client for workout operations. + + Provides lazy-loaded access to Garmin Connect workout operations + including creating, updating, deleting, and scheduling workouts. + + Returns: + WorkoutClient instance for workout operations + + Example: + >>> client = APIClient(auth_client=auth) + >>> workouts = client.workouts.list_workouts() + >>> new_workout = client.workouts.create_workout(workout) + """ + if not hasattr(self, "_workouts"): + from ..workouts.client import WorkoutClient + + self._workouts = WorkoutClient(self) + return self._workouts + def get_user_profile(self) -> Dict[str, Any]: """Get user profile information from the API. @@ -327,8 +349,8 @@ def request( # Use HTTP client to build URL url = self.http_client.build_url(subdomain, path) - # Get authentication headers if needed - headers = kwargs.get("headers", {}) + # Extract headers from kwargs (pop to avoid passing twice) + headers = kwargs.pop("headers", {}) if api: auth_headers = self.auth_delegate.get_auth_headers() headers.update(auth_headers) diff --git a/src/garmy/mcp/cli.py b/src/garmy/mcp/cli.py index b7642df..896a97c 100644 --- a/src/garmy/mcp/cli.py +++ b/src/garmy/mcp/cli.py @@ -151,7 +151,9 @@ def cmd_server(args): print("Recommendations:", file=sys.stderr) print(" - Use firewall rules to restrict access", file=sys.stderr) print(" - Use SSH tunneling for remote access", file=sys.stderr) - print(" - Consider using 127.0.0.1 for localhost-only", file=sys.stderr) + print( + " - Consider using 127.0.0.1 for localhost-only", file=sys.stderr + ) print("=" * 60, file=sys.stderr) print("", file=sys.stderr) @@ -174,6 +176,7 @@ def cmd_server(args): port=args.port, enable_sync=args.enable_sync, profile_path=profile_path, + enable_workouts=args.enable_workouts, ) if args.verbose: @@ -218,6 +221,20 @@ def cmd_server(args): else: print(f" - Sync enabled: no (use --enable-sync to enable)") + if config.enable_workouts: + tools_list.extend( + [ + "list_workouts", + "get_workout", + "create_workout", + "schedule_workout", + "delete_workout", + ] + ) + print(" - Workouts enabled: yes") + else: + print(" - Workouts enabled: no (use --enable-workouts to enable)") + print(f"Available tools: {', '.join(tools_list)}") # Create and run server with explicit config @@ -297,10 +314,19 @@ def cmd_info(args): print(f"\\nWarning: Could not analyze database structure: {e}") print("\\nMCP Server Tools:") + print(" Core tools (always available):") print(" - explore_database_structure() - Discover available data") print(" - get_table_details(name) - Get table schema and samples") print(" - execute_sql_query(sql, params) - Run SQL queries safely") print(" - get_health_summary(user_id, days) - Quick health overview") + print(" Sync tool (requires --enable-sync):") + print(" - sync_health_data(last_days, metrics) - Fetch fresh data from Garmin") + print(" Workout tools (requires --enable-workouts):") + print(" - list_workouts(limit) - List workouts from Garmin Connect") + print(" - get_workout(workout_id) - Get workout details") + print(" - create_workout(name, sport_type, steps_json) - Create a workout") + print(" - schedule_workout(workout_id, date) - Schedule workout") + print(" - delete_workout(workout_id) - Delete a workout") print("\\nTo start MCP server:") print(f" garmy-mcp server --database {db_path}") @@ -376,12 +402,28 @@ def cmd_config(args): print(" --enable-sync: Enable the sync_health_data tool") print(" --profile-path: Path to profile directory (contains tokens and database)") print("") - print(" The sync tool allows AI assistants to fetch fresh data from Garmin Connect.") + print( + " The sync tool allows AI assistants to fetch fresh data from Garmin Connect." + ) print(" Requires valid saved authentication tokens - run 'garmy-sync sync' first.") print("") print(" Example with sync enabled:") print(" garmy-mcp server --profile-path ~/profiles/user1 --enable-sync") + print("\n🏋️ Workout Settings:") + print(" --enable-workouts: Enable workout management tools") + print("") + print(" Workout tools allow AI assistants to create, list, schedule, and delete") + print(" workouts in Garmin Connect. Requires valid saved authentication tokens.") + print("") + print(" Example with workouts enabled:") + print(" garmy-mcp server --profile-path ~/profiles/user1 --enable-workouts") + print("") + print(" Example with both sync and workouts:") + print( + " garmy-mcp server --profile-path ~/profiles/user1 --enable-sync --enable-workouts" + ) + def create_parser(): """Create argument parser with subcommands.""" @@ -505,6 +547,13 @@ def create_parser(): "Requires valid saved authentication tokens (run 'garmy-sync sync' first to authenticate).", ) + server_parser.add_argument( + "--enable-workouts", + action="store_true", + help="Enable workout management tools (list, create, schedule, delete workouts). " + "Requires valid saved authentication tokens (run 'garmy-sync sync' first to authenticate).", + ) + server_parser.set_defaults(func=cmd_server) # Info command diff --git a/src/garmy/mcp/config.py b/src/garmy/mcp/config.py index f9164a2..2690665 100644 --- a/src/garmy/mcp/config.py +++ b/src/garmy/mcp/config.py @@ -29,6 +29,9 @@ class MCPConfig: enable_sync: bool = False profile_path: Optional[Path] = None + # Workout settings (requires authentication) + enable_workouts: bool = False + @classmethod def from_db_path(cls, db_path: Path, **kwargs) -> "MCPConfig": """Create config with database path and optional overrides.""" @@ -65,6 +68,4 @@ def validate(self) -> None: if self.transport in ("http", "sse"): if self.port < 1 or self.port > 65535: - raise ValueError( - f"Port must be between 1 and 65535, got {self.port}" - ) + raise ValueError(f"Port must be between 1 and 65535, got {self.port}") diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index 4939059..17e3c60 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -414,6 +414,10 @@ def health_data_guide() -> str: if config.enable_sync: _register_sync_tool(mcp, config) + # Only add workout tools if workouts are enabled + if config.enable_workouts: + _register_workout_tools(mcp, config) + return mcp @@ -501,9 +505,7 @@ def sync_health_data( sync_metrics.append(MetricType[name]) except KeyError: available = ", ".join([m.name for m in MetricType]) - raise ValueError( - f"Invalid metric: {name}. Available: {available}" - ) + raise ValueError(f"Invalid metric: {name}. Available: {available}") # Calculate date range end_date = date.today() @@ -573,6 +575,533 @@ def sync_health_data( } +def _register_workout_tools(mcp: FastMCP, config: MCPConfig) -> None: + """Register workout management tools with the MCP server. + + Args: + mcp: The FastMCP server instance + config: MCP configuration with workout settings + """ + from ..auth.client import AuthClient + from ..core.client import APIClient + from ..workouts import SportType, WorkoutBuilder + from ..workouts.exercises import ( + resolve_exercise, + search_exercises as search_exercises_func, + ) + + def _get_authenticated_client() -> APIClient: + """Get an authenticated API client using saved tokens.""" + auth_client = AuthClient(token_dir=config.token_dir) + if not auth_client.is_authenticated: + raise ValueError( + "Authentication required. Please run 'garmy-sync sync' from the " + "command line first to authenticate, then try again." + ) + return APIClient(auth_client=auth_client) + + @mcp.tool() + def list_workouts( + limit: int = 20, + my_workouts_only: bool = True, + ) -> Dict[str, Any]: + """WHEN TO USE: When you need to see existing workouts in Garmin Connect. + + Lists workouts from the user's Garmin Connect account. Use this to see + what workouts are available before modifying or scheduling them. + + IMPORTANT: Requires valid saved authentication tokens. + + Args: + limit: Maximum number of workouts to return (default: 20, max: 100) + my_workouts_only: If True, only return user's own workouts (default: True) + + Returns: + List of workouts with their IDs, names, sport types, and step counts + """ + if limit < 1 or limit > 100: + raise ValueError("limit must be between 1 and 100") + + try: + api = _get_authenticated_client() + workouts = api.workouts.list_workouts( + limit=limit, my_workouts_only=my_workouts_only + ) + + return { + "success": True, + "count": len(workouts), + "workouts": [ + { + "workout_id": w.workout_id, + "name": w.name, + "sport_type": w.sport_type.key, + "description": w.description, + "step_count": len(w.steps), + } + for w in workouts + ], + } + except Exception as e: + return {"success": False, "error": str(e)} + + @mcp.tool() + def get_workout(workout_id: int) -> Dict[str, Any]: + """WHEN TO USE: When you need details about a specific workout. + + Gets the full details of a workout including all steps, targets, and durations. + + IMPORTANT: Requires valid saved authentication tokens. + + Args: + workout_id: The Garmin workout ID to retrieve + + Returns: + Full workout details including steps and structure + """ + if workout_id < 1: + raise ValueError("workout_id must be positive") + + try: + api = _get_authenticated_client() + workout = api.workouts.get_workout(workout_id) + + if workout is None: + return {"success": False, "error": f"Workout {workout_id} not found"} + + # Format steps for readability + steps_info = [] + for i, step in enumerate(workout.steps): + from ..workouts.models import RepeatGroup + + if isinstance(step, RepeatGroup): + steps_info.append( + { + "index": i + 1, + "type": "repeat", + "iterations": step.iterations, + "steps": [ + { + "type": s.step_type.value, + "duration_seconds": s.end_condition.value, + "end_condition": s.end_condition.condition_type.value, + "target_type": s.target.target_type.value, + "exercise_name": s.exercise_name, + "exercise_category": s.exercise_category, + "weight_value": s.weight_value, + "weight_unit": s.weight_unit, + "description": s.description, + } + for s in step.steps + ], + } + ) + else: + steps_info.append( + { + "index": i + 1, + "type": step.step_type.value, + "duration_seconds": step.end_condition.value, + "end_condition": step.end_condition.condition_type.value, + "target_type": step.target.target_type.value, + "target_low": step.target.value_low, + "target_high": step.target.value_high, + "exercise_name": step.exercise_name, + "exercise_category": step.exercise_category, + "weight_value": step.weight_value, + "weight_unit": step.weight_unit, + "description": step.description, + } + ) + + return { + "success": True, + "workout": { + "workout_id": workout.workout_id, + "name": workout.name, + "sport_type": workout.sport_type.key, + "description": workout.description, + "steps": steps_info, + }, + } + except Exception as e: + return {"success": False, "error": str(e)} + + @mcp.tool() + def create_workout( + name: str, + sport_type: str = "cycling", + description: Optional[str] = None, + steps_json: Optional[str] = None, + ) -> Dict[str, Any]: + """WHEN TO USE: When you need to create a new workout in Garmin Connect. + + Creates a structured workout that can be synced to Garmin devices. + + IMPORTANT: Requires valid saved authentication tokens. + + Args: + name: Name for the workout + sport_type: Sport type (cycling, running, swimming, strength_training, etc.) + description: Optional description + steps_json: JSON string defining workout steps. Format: + [{"type": "warmup|interval|recovery|cooldown|rest", + "seconds": 60, "minutes": 10, "duration_seconds": 60, + "target_power": [88, 93], "description": "..."}] + For repeats: {"type": "repeat", "iterations": 3, "steps": [...]} + Duration can be specified as "minutes", "seconds", or "duration_seconds" + + Example steps_json: + '[{"type": "warmup", "seconds": 300}, + {"type": "repeat", "iterations": 3, "steps": [ + {"type": "interval", "duration_seconds": 30, "target_power": [90, 95]}, + {"type": "rest", "duration_seconds": 60} + ]}, + {"type": "cooldown", "minutes": 5}]' + + Returns: + Created workout details including the new workout_id + """ + import json + + # Validate sport type + try: + sport = SportType.from_key(sport_type.lower()) + except (ValueError, AttributeError) as e: + available = [s.key for s in SportType] + raise ValueError( + f"Invalid sport_type: {sport_type}. Available: {', '.join(available)}" + ) from e + + try: + api = _get_authenticated_client() + builder = WorkoutBuilder(name, sport) + + if description: + builder.with_description(description) + + # Parse and add steps if provided + if steps_json: + steps = json.loads(steps_json) + _add_steps_from_json(builder, steps) + + workout = builder.build() + result = api.workouts.create_workout(workout) + + return { + "success": True, + "message": f"Workout '{name}' created successfully", + "workout": { + "workout_id": result.workout_id, + "name": result.name, + # Use the sport type we requested, not from response + # (Garmin returns sportTypeKey=null when we only send sportTypeKey) + "sport_type": sport.key, + }, + } + except json.JSONDecodeError as e: + return {"success": False, "error": f"Invalid steps_json format: {e}"} + except Exception as e: + return {"success": False, "error": str(e)} + + @mcp.tool() + def schedule_workout( + workout_id: int, + date: str, + ) -> Dict[str, Any]: + """WHEN TO USE: When you need to schedule a workout for a specific date. + + Schedules an existing workout to appear on the user's Garmin calendar + for the specified date. The workout will sync to connected devices. + + IMPORTANT: Requires valid saved authentication tokens. + + Args: + workout_id: The Garmin workout ID to schedule + date: Date to schedule in YYYY-MM-DD format (e.g., "2024-01-15") + + Returns: + Success status and confirmation message + """ + import re + + if workout_id < 1: + raise ValueError("workout_id must be positive") + + if not re.match(r"^\d{4}-\d{2}-\d{2}$", date): + raise ValueError("date must be in YYYY-MM-DD format") + + try: + api = _get_authenticated_client() + api.workouts.schedule_workout(workout_id, date) + + return { + "success": True, + "message": f"Workout {workout_id} scheduled for {date}", + "workout_id": workout_id, + "scheduled_date": date, + } + except Exception as e: + return {"success": False, "error": str(e)} + + @mcp.tool() + def delete_workout(workout_id: int) -> Dict[str, Any]: + """WHEN TO USE: When you need to delete a workout from Garmin Connect. + + Permanently deletes a workout. This cannot be undone. + + IMPORTANT: Requires valid saved authentication tokens. + + Args: + workout_id: The Garmin workout ID to delete + + Returns: + Success status and confirmation message + """ + if workout_id < 1: + raise ValueError("workout_id must be positive") + + try: + api = _get_authenticated_client() + api.workouts.delete_workout(workout_id) + + return { + "success": True, + "message": f"Workout {workout_id} deleted successfully", + "workout_id": workout_id, + } + except Exception as e: + return {"success": False, "error": str(e)} + + @mcp.tool() + def search_exercises( + query: str, + limit: int = 10, + category: Optional[str] = None, + ) -> Dict[str, Any]: + """WHEN TO USE: When you need to find valid Garmin exercise names for strength workouts. + + Searches the exercise database to find matching exercises. Use this to discover + the correct exercise name before creating a strength training workout. + + This tool does NOT require authentication - it's a local lookup. + + Args: + query: Search term (e.g., "bench press", "curl", "squat") + limit: Maximum results to return (default: 10, max: 50) + category: Optional category filter (e.g., "BENCH_PRESS", "CURL", "SQUAT") + + Returns: + List of matching exercises with their Garmin names, categories, and match scores + """ + if limit < 1 or limit > 50: + raise ValueError("limit must be between 1 and 50") + + try: + results = search_exercises_func(query, limit=limit) + + # Filter by category if provided + if category: + category_upper = category.upper() + results = [r for r in results if r.category == category_upper] + + return { + "success": True, + "query": query, + "count": len(results), + "exercises": [ + { + "name": r.name, + "category": r.category, + "score": round(r.score, 3), + } + for r in results + ], + "usage_tip": "Use the 'name' field as exercise_name in create_workout steps_json", + } + except Exception as e: + return {"success": False, "error": str(e)} + + def _is_garmin_format(name: str) -> bool: + """Check if name is already in SCREAMING_SNAKE_CASE Garmin format.""" + return name == name.upper() and "_" in name + + def _add_steps_from_json(builder: WorkoutBuilder, steps: list) -> None: + """Add steps to builder from JSON structure.""" + for step in steps: + step_type = step.get("type", "interval").lower() + minutes = step.get("minutes") + # Accept both "seconds" and "duration_seconds" for consistency with get_workout output + seconds = step.get("seconds") or step.get("duration_seconds") + distance_km = step.get("distance_km") + target_power = step.get("target_power") + target_hr = step.get("target_hr") + target_cadence = step.get("target_cadence") + description = step.get("description") + lap_button = step.get("lap_button", False) + + # Extract exercise fields for strength training + reps = step.get("reps") + exercise_name = step.get("exercise_name") + exercise_category = step.get("exercise_category") + weight_value = step.get("weight_value") + weight_unit = step.get("weight_unit") + + # Auto-resolve exercise name if not already in Garmin format + if exercise_name and not _is_garmin_format(exercise_name): + try: + resolved_name, resolved_category = resolve_exercise(exercise_name) + exercise_name = resolved_name + # Only override category if not explicitly provided + if not exercise_category: + exercise_category = resolved_category + except ValueError: + # If resolution fails, pass through the original name + # Let Garmin API handle the error + pass + + # Convert target lists to tuples + if target_power and isinstance(target_power, list): + target_power = tuple(target_power) + if target_hr and isinstance(target_hr, list): + target_hr = tuple(target_hr) + if target_cadence and isinstance(target_cadence, list): + target_cadence = tuple(target_cadence) + + if step_type == "repeat": + iterations = step.get("iterations", 1) + repeat_steps = step.get("steps", []) + repeat_builder = builder.repeat(iterations) + + for rs in repeat_steps: + rs_type = rs.get("type", "interval").lower() + rs_minutes = rs.get("minutes") + # Accept both "seconds" and "duration_seconds" + rs_seconds = rs.get("seconds") or rs.get("duration_seconds") + rs_target_power = rs.get("target_power") + rs_target_hr = rs.get("target_hr") + rs_desc = rs.get("description") + + # Extract exercise fields for nested steps + rs_reps = rs.get("reps") + rs_exercise_name = rs.get("exercise_name") + rs_exercise_category = rs.get("exercise_category") + rs_weight_value = rs.get("weight_value") + rs_weight_unit = rs.get("weight_unit") + + # Auto-resolve exercise name if not already in Garmin format + if rs_exercise_name and not _is_garmin_format(rs_exercise_name): + try: + resolved_name, resolved_category = resolve_exercise( + rs_exercise_name + ) + rs_exercise_name = resolved_name + # Only override category if not explicitly provided + if not rs_exercise_category: + rs_exercise_category = resolved_category + except ValueError: + # If resolution fails, pass through the original name + # Let Garmin API handle the error + pass + + if rs_target_power and isinstance(rs_target_power, list): + rs_target_power = tuple(rs_target_power) + if rs_target_hr and isinstance(rs_target_hr, list): + rs_target_hr = tuple(rs_target_hr) + + if rs_type == "interval": + repeat_builder.interval( + minutes=rs_minutes, + seconds=rs_seconds, + target_power=rs_target_power, + target_hr=rs_target_hr, + description=rs_desc, + reps=rs_reps, + exercise_name=rs_exercise_name, + exercise_category=rs_exercise_category, + weight_value=rs_weight_value, + weight_unit=rs_weight_unit, + ) + elif rs_type == "recovery": + repeat_builder.recovery( + minutes=rs_minutes, + seconds=rs_seconds, + target_power=rs_target_power, + target_hr=rs_target_hr, + description=rs_desc, + reps=rs_reps, + exercise_name=rs_exercise_name, + exercise_category=rs_exercise_category, + weight_value=rs_weight_value, + weight_unit=rs_weight_unit, + ) + elif rs_type == "rest": + repeat_builder.rest( + minutes=rs_minutes, seconds=rs_seconds, description=rs_desc + ) + + repeat_builder.end_repeat() + + elif step_type == "warmup": + builder.warmup( + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + target_power=target_power, + target_hr=target_hr, + lap_button=lap_button, + description=description, + ) + elif step_type == "cooldown": + builder.cooldown( + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + target_power=target_power, + target_hr=target_hr, + lap_button=lap_button, + description=description, + ) + elif step_type == "interval": + builder.interval( + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + lap_button=lap_button, + description=description, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + elif step_type == "recovery": + builder.recovery( + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + target_power=target_power, + target_hr=target_hr, + lap_button=lap_button, + description=description, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + elif step_type == "rest": + builder.rest( + minutes=minutes, + seconds=seconds, + lap_button=lap_button, + description=description, + ) + + def _get_table_description(table_name: str) -> str: """Get human-readable description for table.""" descriptions = { @@ -599,7 +1128,7 @@ def _get_health_data_guide() -> str: ### daily_health_metrics **WHAT**: Daily summaries of all health metrics **CONTAINS**: steps, sleep hours, heart rate averages, stress levels, body battery -**COMMON QUERIES**: +**COMMON QUERIES**: - Recent trends: `SELECT metric_date, total_steps, sleep_duration_hours FROM daily_health_metrics WHERE user_id = 1 ORDER BY metric_date DESC LIMIT 30` - Sleep analysis: `SELECT metric_date, sleep_duration_hours, deep_sleep_hours FROM daily_health_metrics WHERE sleep_duration_hours IS NOT NULL` diff --git a/src/garmy/workouts/__init__.py b/src/garmy/workouts/__init__.py new file mode 100644 index 0000000..ed43898 --- /dev/null +++ b/src/garmy/workouts/__init__.py @@ -0,0 +1,86 @@ +""" +Garmy Workouts Module. + +This module provides functionality for creating, managing, and scheduling +workouts in Garmin Connect. + +Example: + >>> from garmy import AuthClient, APIClient + >>> from garmy.workouts import WorkoutBuilder, SportType + >>> + >>> # Authenticate + >>> auth = AuthClient() + >>> api = APIClient(auth_client=auth) + >>> auth.login("email", "password") + >>> + >>> # Create a workout using the fluent builder + >>> workout = ( + ... WorkoutBuilder("Sweet Spot 2x20", SportType.CYCLING) + ... .with_description("Endurance builder workout") + ... .warmup(minutes=15, target_power=(50, 65)) + ... .repeat(2) + ... .interval(minutes=20, target_power=(88, 93)) + ... .recovery(minutes=5, target_power=(40, 50)) + ... .end_repeat() + ... .cooldown(minutes=10, target_power=(40, 55)) + ... .build() + ... ) + >>> + >>> # Create the workout in Garmin Connect + >>> result = api.workouts.create_workout(workout) + >>> print(f"Created workout: {result.workout_id}") + >>> + >>> # List existing workouts + >>> workouts = api.workouts.list_workouts(limit=10) + >>> for w in workouts: + ... print(f"{w.name} ({w.sport_type.key})") +""" + +from .builder import RepeatBuilder, WorkoutBuilder +from .client import WorkoutClient +from .constants import ( + EndConditionType, + IntensityType, + SportType, + StepType, + TargetType, +) +from .exercises import ( + ExerciseMatcher, + MatchResult, + get_matcher, + resolve_exercise, + search_exercises, +) +from .models import ( + EndCondition, + RepeatGroup, + Target, + Workout, + WorkoutSegment, + WorkoutStep, +) +from .serializer import WorkoutSerializer + +__all__ = [ + "EndCondition", + "EndConditionType", + "ExerciseMatcher", + "IntensityType", + "MatchResult", + "RepeatBuilder", + "RepeatGroup", + "SportType", + "StepType", + "Target", + "TargetType", + "Workout", + "WorkoutBuilder", + "WorkoutClient", + "WorkoutSegment", + "WorkoutSerializer", + "WorkoutStep", + "get_matcher", + "resolve_exercise", + "search_exercises", +] diff --git a/src/garmy/workouts/builder.py b/src/garmy/workouts/builder.py new file mode 100644 index 0000000..574f50d --- /dev/null +++ b/src/garmy/workouts/builder.py @@ -0,0 +1,548 @@ +""" +Fluent builder API for creating Garmin workouts. + +This module provides an intuitive, chainable API for constructing workouts +without needing to manually create all the underlying model objects. + +Example: + >>> workout = ( + ... WorkoutBuilder("Sweet Spot 2x20", SportType.CYCLING) + ... .with_description("Endurance builder") + ... .warmup(minutes=15, target_power=(50, 65)) + ... .repeat(2) + ... .interval(minutes=20, target_power=(88, 93)) + ... .recovery(minutes=5) + ... .end_repeat() + ... .cooldown(minutes=10) + ... .build() + ... ) +""" + +from typing import List, Optional, Tuple + +from .constants import SportType, StepType +from .models import ( + EndCondition, + RepeatGroup, + Target, + Workout, + WorkoutStep, + WorkoutStepOrRepeat, +) + + +class RepeatBuilder: + """Builder for repeat group steps. + + Created via WorkoutBuilder.repeat() and returns to parent via end_repeat(). + """ + + def __init__(self, parent: "WorkoutBuilder", iterations: int) -> None: + """Initialize repeat builder. + + Args: + parent: The parent WorkoutBuilder to return to. + iterations: Number of times to repeat the steps. + """ + self._parent = parent + self._iterations = iterations + self._steps: List[WorkoutStep] = [] + + def _create_step( + self, + step_type: StepType, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> WorkoutStep: + """Create a workout step with the given parameters.""" + # Determine end condition + if lap_button: + end_condition = EndCondition.lap_button() + elif reps is not None: + end_condition = EndCondition.reps(reps) + elif minutes is not None: + end_condition = EndCondition.time_minutes(minutes) + elif seconds is not None: + end_condition = EndCondition.time(seconds) + elif distance_km is not None: + end_condition = EndCondition.distance_km(distance_km) + elif distance_miles is not None: + end_condition = EndCondition.distance_miles(distance_miles) + else: + end_condition = EndCondition.lap_button() + + # Determine target + if target_power is not None: + target = Target.power_zone(target_power[0], target_power[1]) + elif target_hr is not None: + target = Target.heart_rate_zone(target_hr[0], target_hr[1]) + elif target_cadence is not None: + target = Target.cadence_zone(target_cadence[0], target_cadence[1]) + else: + target = Target.no_target() + + return WorkoutStep( + step_type=step_type, + end_condition=end_condition, + target=target, + description=description, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + + def interval( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> "RepeatBuilder": + """Add an interval step to the repeat group.""" + step = self._create_step( + StepType.INTERVAL, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + self._steps.append(step) + return self + + def recovery( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> "RepeatBuilder": + """Add a recovery step to the repeat group.""" + step = self._create_step( + StepType.RECOVERY, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + self._steps.append(step) + return self + + def rest( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + description: Optional[str] = None, + lap_button: bool = False, + ) -> "RepeatBuilder": + """Add a rest step to the repeat group.""" + step = self._create_step( + StepType.REST, + minutes=minutes, + seconds=seconds, + description=description, + lap_button=lap_button, + ) + self._steps.append(step) + return self + + def step( + self, + step_type: StepType, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> "RepeatBuilder": + """Add a generic step to the repeat group.""" + step = self._create_step( + step_type, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + self._steps.append(step) + return self + + def end_repeat(self) -> "WorkoutBuilder": + """Finish the repeat group and return to the parent builder.""" + repeat_group = RepeatGroup(iterations=self._iterations, steps=self._steps) + self._parent._steps.append(repeat_group) + return self._parent + + +class WorkoutBuilder: + """Fluent builder for creating Garmin workouts. + + Example: + >>> workout = ( + ... WorkoutBuilder("My Workout", SportType.CYCLING) + ... .warmup(minutes=10) + ... .interval(minutes=5, target_power=(90, 95)) + ... .cooldown(minutes=5) + ... .build() + ... ) + """ + + def __init__( + self, + name: str, + sport_type: SportType = SportType.CYCLING, + ) -> None: + """Initialize the workout builder. + + Args: + name: Name of the workout. + sport_type: Type of sport for the workout. + """ + self._name = name + self._sport_type = sport_type + self._description: Optional[str] = None + self._steps: List[WorkoutStepOrRepeat] = [] + + def with_description(self, description: str) -> "WorkoutBuilder": + """Set the workout description.""" + self._description = description + return self + + def _create_step( + self, + step_type: StepType, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> WorkoutStep: + """Create a workout step with the given parameters.""" + # Determine end condition + if lap_button: + end_condition = EndCondition.lap_button() + elif reps is not None: + end_condition = EndCondition.reps(reps) + elif minutes is not None: + end_condition = EndCondition.time_minutes(minutes) + elif seconds is not None: + end_condition = EndCondition.time(seconds) + elif distance_km is not None: + end_condition = EndCondition.distance_km(distance_km) + elif distance_miles is not None: + end_condition = EndCondition.distance_miles(distance_miles) + else: + end_condition = EndCondition.lap_button() + + # Determine target + if target_power is not None: + target = Target.power_zone(target_power[0], target_power[1]) + elif target_hr is not None: + target = Target.heart_rate_zone(target_hr[0], target_hr[1]) + elif target_cadence is not None: + target = Target.cadence_zone(target_cadence[0], target_cadence[1]) + else: + target = Target.no_target() + + return WorkoutStep( + step_type=step_type, + end_condition=end_condition, + target=target, + description=description, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + + def warmup( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + ) -> "WorkoutBuilder": + """Add a warmup step.""" + step = self._create_step( + StepType.WARMUP, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + ) + self._steps.append(step) + return self + + def cooldown( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + ) -> "WorkoutBuilder": + """Add a cooldown step.""" + step = self._create_step( + StepType.COOLDOWN, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + ) + self._steps.append(step) + return self + + def interval( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> "WorkoutBuilder": + """Add an interval step.""" + step = self._create_step( + StepType.INTERVAL, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + self._steps.append(step) + return self + + def recovery( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> "WorkoutBuilder": + """Add a recovery step.""" + step = self._create_step( + StepType.RECOVERY, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + self._steps.append(step) + return self + + def rest( + self, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + description: Optional[str] = None, + lap_button: bool = False, + ) -> "WorkoutBuilder": + """Add a rest step.""" + step = self._create_step( + StepType.REST, + minutes=minutes, + seconds=seconds, + description=description, + lap_button=lap_button, + ) + self._steps.append(step) + return self + + def step( + self, + step_type: StepType, + minutes: Optional[float] = None, + seconds: Optional[float] = None, + distance_km: Optional[float] = None, + distance_miles: Optional[float] = None, + target_power: Optional[Tuple[float, float]] = None, + target_hr: Optional[Tuple[float, float]] = None, + target_cadence: Optional[Tuple[int, int]] = None, + description: Optional[str] = None, + lap_button: bool = False, + reps: Optional[int] = None, + exercise_name: Optional[str] = None, + exercise_category: Optional[str] = None, + weight_value: Optional[float] = None, + weight_unit: Optional[str] = None, + ) -> "WorkoutBuilder": + """Add a generic step with the specified type.""" + step = self._create_step( + step_type, + minutes=minutes, + seconds=seconds, + distance_km=distance_km, + distance_miles=distance_miles, + target_power=target_power, + target_hr=target_hr, + target_cadence=target_cadence, + description=description, + lap_button=lap_button, + reps=reps, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + self._steps.append(step) + return self + + def repeat(self, iterations: int) -> RepeatBuilder: + """Start a repeat group with the specified number of iterations. + + Use end_repeat() on the returned RepeatBuilder to return to this builder. + + Example: + >>> builder.repeat(3).interval(minutes=5).recovery(minutes=2).end_repeat() + """ + return RepeatBuilder(self, iterations) + + def add_step(self, step: WorkoutStepOrRepeat) -> "WorkoutBuilder": + """Add a pre-built step or repeat group.""" + self._steps.append(step) + return self + + def build(self) -> Workout: + """Build and return the Workout object.""" + return Workout( + name=self._name, + sport_type=self._sport_type, + description=self._description, + steps=self._steps, + ) diff --git a/src/garmy/workouts/client.py b/src/garmy/workouts/client.py new file mode 100644 index 0000000..883ad7d --- /dev/null +++ b/src/garmy/workouts/client.py @@ -0,0 +1,329 @@ +""" +Workout client for Garmin Connect workout API operations. + +This module provides the WorkoutClient class for CRUD operations on +Garmin Connect workouts. +""" + +from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union, cast + +if TYPE_CHECKING: + from ..core.client import APIClient + +from .models import Workout +from .serializer import WorkoutSerializer + + +class WorkoutClient: + """Client for Garmin Connect workout API operations. + + Provides methods for listing, creating, updating, and deleting workouts + in Garmin Connect. Also supports scheduling workouts and downloading FIT files. + + Example: + >>> from garmy import AuthClient, APIClient + >>> auth = AuthClient() + >>> api = APIClient(auth_client=auth) + >>> auth.login("email", "password") + >>> + >>> # List workouts + >>> workouts = api.workouts.list_workouts() + >>> + >>> # Create a workout + >>> from garmy.workouts import WorkoutBuilder, SportType + >>> workout = WorkoutBuilder("My Workout", SportType.CYCLING).build() + >>> result = api.workouts.create_workout(workout) + """ + + # Headers required for workout API calls + WORKOUT_HEADERS: ClassVar[Dict[str, str]] = { + "Referer": "https://connect.garmin.com/modern/workouts", + "nk": "NT", + } + + def __init__(self, api_client: "APIClient") -> None: + """Initialize the workout client. + + Args: + api_client: The APIClient instance for making API requests. + """ + self.api_client = api_client + + def list_workouts( + self, + limit: int = 20, + start: int = 0, + my_workouts_only: bool = True, + order_by: str = "WORKOUT_NAME", + order_seq: str = "ASC", + ) -> List[Workout]: + """List workouts from Garmin Connect. + + Args: + limit: Maximum number of workouts to return. Default 20. + start: Starting offset for pagination. Default 0. + my_workouts_only: If True, only return user's own workouts. + order_by: Field to order by (WORKOUT_NAME, UPDATE_DATE, etc.) + order_seq: Order sequence (ASC or DESC). + + Returns: + List of Workout objects. + """ + params = { + "start": start, + "limit": limit, + "myWorkoutsOnly": str(my_workouts_only).lower(), + "orderBy": order_by, + "orderSeq": order_seq, + } + + endpoint = "/workout-service/workouts" + query = "&".join(f"{k}={v}" for k, v in params.items()) + + response = self.api_client.connectapi( + f"{endpoint}?{query}", + headers=self.WORKOUT_HEADERS, + ) + + if response is None: + return [] + + # API returns a list of workout dicts (cast needed as connectapi type hint is incomplete) + raw_data = cast("List[Dict[str, Any]]", response) + return [WorkoutSerializer.from_api_format(w) for w in raw_data] + + def get_workout(self, workout_id: Union[int, str]) -> Optional[Workout]: + """Get a specific workout by ID. + + Args: + workout_id: The Garmin workout ID. + + Returns: + Workout object if found, None otherwise. + """ + endpoint = f"/workout-service/workout/{workout_id}" + + raw_data = self.api_client.connectapi( + endpoint, + headers=self.WORKOUT_HEADERS, + ) + + if not raw_data or not isinstance(raw_data, dict): + return None + + return WorkoutSerializer.from_api_format(raw_data) + + def create_workout(self, workout: Workout) -> Workout: + """Create a new workout in Garmin Connect. + + Args: + workout: The Workout object to create. + + Returns: + The created Workout with workout_id populated. + + Raises: + APIError: If the creation fails. + """ + endpoint = "/workout-service/workout" + payload = WorkoutSerializer.to_api_format(workout) + + raw_data = self.api_client.connectapi( + endpoint, + method="POST", + json=payload, + headers=self.WORKOUT_HEADERS, + ) + + if not raw_data or not isinstance(raw_data, dict): + # Return original workout if response is unexpected + return workout + + return WorkoutSerializer.from_api_format(raw_data) + + def create_workout_raw(self, data: Dict[str, Any]) -> Dict[str, Any]: + """Create a workout using raw API format. + + For advanced users who want direct control over the API payload. + + Args: + data: Dictionary in Garmin API format. + + Returns: + Raw API response dictionary. + """ + endpoint = "/workout-service/workout" + + result = self.api_client.connectapi( + endpoint, + method="POST", + json=data, + headers=self.WORKOUT_HEADERS, + ) + + if isinstance(result, dict): + return result + return {} + + def update_workout(self, workout: Workout) -> Workout: + """Update an existing workout in Garmin Connect. + + Args: + workout: The Workout object with workout_id set. + + Returns: + The updated Workout object. + + Raises: + ValueError: If workout_id is not set. + APIError: If the update fails. + """ + if not workout.workout_id: + raise ValueError("workout_id must be set for update") + + endpoint = f"/workout-service/workout/{workout.workout_id}" + payload = WorkoutSerializer.to_api_format(workout) + + raw_data = self.api_client.connectapi( + endpoint, + method="PUT", + json=payload, + headers=self.WORKOUT_HEADERS, + ) + + if not raw_data or not isinstance(raw_data, dict): + return workout + + return WorkoutSerializer.from_api_format(raw_data) + + def delete_workout(self, workout_id: Union[int, str]) -> bool: + """Delete a workout from Garmin Connect. + + Args: + workout_id: The Garmin workout ID to delete. + + Returns: + True if deletion was successful. + + Raises: + APIError: If the deletion fails. + """ + endpoint = f"/workout-service/workout/{workout_id}" + + self.api_client.connectapi( + endpoint, + method="DELETE", + headers=self.WORKOUT_HEADERS, + ) + + return True + + def schedule_workout( + self, + workout_id: Union[int, str], + date: str, + ) -> bool: + """Schedule a workout for a specific date. + + Args: + workout_id: The Garmin workout ID to schedule. + date: Date in YYYY-MM-DD format. + + Returns: + True if scheduling was successful. + + Raises: + APIError: If the scheduling fails. + """ + endpoint = f"/workout-service/schedule/{workout_id}" + payload = {"date": date} + + self.api_client.connectapi( + endpoint, + method="POST", + json=payload, + headers=self.WORKOUT_HEADERS, + ) + + return True + + def unschedule_workout( + self, + workout_id: Union[int, str], + date: str, + ) -> bool: + """Remove a scheduled workout from a specific date. + + Args: + workout_id: The Garmin workout ID to unschedule. + date: Date in YYYY-MM-DD format. + + Returns: + True if unscheduling was successful. + + Raises: + APIError: If the unscheduling fails. + """ + endpoint = f"/workout-service/schedule/{workout_id}" + payload = {"date": date} + + self.api_client.connectapi( + endpoint, + method="DELETE", + json=payload, + headers=self.WORKOUT_HEADERS, + ) + + return True + + def download_fit(self, workout_id: Union[int, str]) -> bytes: + """Download workout as FIT file. + + Args: + workout_id: The Garmin workout ID. + + Returns: + FIT file contents as bytes. + + Raises: + APIError: If the download fails. + """ + endpoint = f"/workout-service/workout/FIT/{workout_id}" + + response = self.api_client.request( + "GET", + "connectapi", + endpoint, + api=True, + headers=self.WORKOUT_HEADERS, + ) + + return response.content + + def duplicate_workout( + self, + workout_id: Union[int, str], + new_name: Optional[str] = None, + ) -> Workout: + """Duplicate an existing workout. + + Args: + workout_id: The workout ID to duplicate. + new_name: Optional new name for the duplicated workout. + + Returns: + The newly created duplicate Workout. + """ + original = self.get_workout(workout_id) + if not original: + raise ValueError(f"Workout {workout_id} not found") + + # Create a copy without the ID + duplicate = Workout( + name=new_name or f"{original.name} (Copy)", + sport_type=original.sport_type, + description=original.description, + steps=original.steps, + ) + + return self.create_workout(duplicate) diff --git a/src/garmy/workouts/constants.py b/src/garmy/workouts/constants.py new file mode 100644 index 0000000..c7d53b0 --- /dev/null +++ b/src/garmy/workouts/constants.py @@ -0,0 +1,243 @@ +""" +Constants and enums for Garmin workout API. + +This module defines the enumerations used by the Garmin Connect workout API +for sport types, step types, end conditions, and targets. +""" + +from enum import Enum +from typing import NamedTuple + + +class SportTypeValue(NamedTuple): + """Sport type with numeric ID and string key.""" + + id: int + key: str + + +class SportType(Enum): + """Sport types supported by Garmin workouts. + + Each sport type has a numeric ID and string key used by the API. + IDs verified against Garmin Connect workout-service API. + Note: These IDs are used by Garmin to determine sport type (key is ignored). + """ + + RUNNING = SportTypeValue(1, "running") + CYCLING = SportTypeValue(2, "cycling") + OTHER = SportTypeValue(3, "other") + SWIMMING = SportTypeValue(4, "swimming") + STRENGTH = SportTypeValue(5, "strength_training") + CARDIO = SportTypeValue(6, "cardio_training") + YOGA = SportTypeValue(7, "yoga") + PILATES = SportTypeValue(8, "pilates") + HIIT = SportTypeValue(9, "hiit") + MOBILITY = SportTypeValue(11, "mobility") + WALKING = SportTypeValue(12, "walking") + + @property + def id(self) -> int: + """Get the numeric sport type ID.""" + return self.value.id + + @property + def key(self) -> str: + """Get the string sport type key.""" + return self.value.key + + @classmethod + def from_id(cls, sport_id: int) -> "SportType": + """Get SportType by numeric ID.""" + for sport in cls: + if sport.id == sport_id: + return sport + return cls.OTHER + + @classmethod + def from_key(cls, key: str) -> "SportType": + """Get SportType by string key.""" + key_lower = key.lower() + for sport in cls: + if sport.key == key_lower: + return sport + return cls.OTHER + + +class StepType(Enum): + """Workout step types.""" + + WARMUP = "warmup" + COOLDOWN = "cooldown" + INTERVAL = "interval" + RECOVERY = "recovery" + REST = "rest" + REPEAT = "repeat" + OTHER = "other" + + @property + def type_id(self) -> int: + """Get the numeric step type ID for API.""" + type_ids = { + StepType.WARMUP: 1, + StepType.COOLDOWN: 2, + StepType.INTERVAL: 3, + StepType.RECOVERY: 4, + StepType.REST: 5, + StepType.REPEAT: 6, + StepType.OTHER: 7, + } + return type_ids.get(self, 7) + + @classmethod + def from_type_id(cls, type_id: int) -> "StepType": + """Get StepType from numeric type ID.""" + id_to_type = { + 1: cls.WARMUP, + 2: cls.COOLDOWN, + 3: cls.INTERVAL, + 4: cls.RECOVERY, + 5: cls.REST, + 6: cls.REPEAT, + 7: cls.OTHER, + } + return id_to_type.get(type_id, cls.OTHER) + + +class EndConditionType(Enum): + """How a workout step ends.""" + + LAP_BUTTON = "lap.button" + TIME = "time" + DISTANCE = "distance" + CALORIES = "calories" + HEART_RATE_LESS_THAN = "heart.rate.less.than" + HEART_RATE_GREATER_THAN = "heart.rate.greater.than" + POWER_LESS_THAN = "power.less.than" + POWER_GREATER_THAN = "power.greater.than" + ITERATIONS = "iterations" + REPS = "reps" + + @property + def condition_type_id(self) -> int: + """Get the numeric condition type ID for API.""" + type_ids = { + EndConditionType.LAP_BUTTON: 1, + EndConditionType.TIME: 2, + EndConditionType.DISTANCE: 3, + EndConditionType.CALORIES: 4, + EndConditionType.HEART_RATE_LESS_THAN: 5, + EndConditionType.HEART_RATE_GREATER_THAN: 6, + EndConditionType.POWER_LESS_THAN: 11, + EndConditionType.POWER_GREATER_THAN: 12, + EndConditionType.ITERATIONS: 7, + EndConditionType.REPS: 8, + } + return type_ids.get(self, 1) + + @classmethod + def from_condition_type_id(cls, type_id: int) -> "EndConditionType": + """Get EndConditionType from numeric type ID.""" + id_to_type = { + 1: cls.LAP_BUTTON, + 2: cls.TIME, + 3: cls.DISTANCE, + 4: cls.CALORIES, + 5: cls.HEART_RATE_LESS_THAN, + 6: cls.HEART_RATE_GREATER_THAN, + 7: cls.ITERATIONS, + 8: cls.REPS, + 11: cls.POWER_LESS_THAN, + 12: cls.POWER_GREATER_THAN, + } + return id_to_type.get(type_id, cls.LAP_BUTTON) + + +class TargetType(Enum): + """Target metric types for workout steps.""" + + NO_TARGET = "no.target" + POWER_ZONE = "power.zone" + CADENCE_ZONE = "cadence.zone" + HEART_RATE_ZONE = "heart.rate.zone" + SPEED_ZONE = "speed.zone" + PACE_ZONE = "pace.zone" + POWER_LAP = "power.lap" + HEART_RATE_LAP = "heart.rate.lap" + SPEED_LAP = "speed.lap" + + @property + def target_type_id(self) -> int: + """Get the numeric target type ID for API.""" + type_ids = { + TargetType.NO_TARGET: 1, + TargetType.POWER_ZONE: 2, + TargetType.CADENCE_ZONE: 3, + TargetType.HEART_RATE_ZONE: 4, + TargetType.SPEED_ZONE: 5, + TargetType.PACE_ZONE: 6, + TargetType.POWER_LAP: 7, + TargetType.HEART_RATE_LAP: 8, + TargetType.SPEED_LAP: 9, + } + return type_ids.get(self, 1) + + @classmethod + def from_target_type_id(cls, type_id: int) -> "TargetType": + """Get TargetType from numeric target type ID.""" + id_to_type = { + 1: cls.NO_TARGET, + 2: cls.POWER_ZONE, + 3: cls.CADENCE_ZONE, + 4: cls.HEART_RATE_ZONE, + 5: cls.SPEED_ZONE, + 6: cls.PACE_ZONE, + 7: cls.POWER_LAP, + 8: cls.HEART_RATE_LAP, + 9: cls.SPEED_LAP, + } + return id_to_type.get(type_id, cls.NO_TARGET) + + +class IntensityType(Enum): + """Intensity levels for workout steps.""" + + ACTIVE = "active" + REST = "rest" + WARMUP = "warmup" + COOLDOWN = "cooldown" + RECOVERY = "recovery" + INTERVAL = "interval" + + @property + def intensity_type_id(self) -> int: + """Get the numeric intensity type ID for API.""" + type_ids = { + IntensityType.ACTIVE: 1, + IntensityType.REST: 2, + IntensityType.WARMUP: 3, + IntensityType.COOLDOWN: 4, + IntensityType.RECOVERY: 5, + IntensityType.INTERVAL: 6, + } + return type_ids.get(self, 1) + + @classmethod + def from_intensity_type_id(cls, type_id: int) -> "IntensityType": + """Get IntensityType from numeric intensity type ID.""" + id_to_type = { + 1: cls.ACTIVE, + 2: cls.REST, + 3: cls.WARMUP, + 4: cls.COOLDOWN, + 5: cls.RECOVERY, + 6: cls.INTERVAL, + } + return id_to_type.get(type_id, cls.ACTIVE) + + +# Unit conversion constants +METERS_PER_KILOMETER = 1000 +METERS_PER_MILE = 1609.344 +SECONDS_PER_MINUTE = 60 +SECONDS_PER_HOUR = 3600 diff --git a/src/garmy/workouts/exercises.py b/src/garmy/workouts/exercises.py new file mode 100644 index 0000000..a1038be --- /dev/null +++ b/src/garmy/workouts/exercises.py @@ -0,0 +1,2009 @@ +""" +Fuzzy exercise name matching for Garmin workouts. + +This module provides intelligent matching from user-friendly exercise names +(e.g., "bench press", "dumbbell curl") to Garmin's required SCREAMING_SNAKE_CASE +format (e.g., BARBELL_BENCH_PRESS, DUMBBELL_BICEPS_CURL). + +Example: + >>> from garmy.workouts.exercises import ExerciseMatcher + >>> matcher = ExerciseMatcher() + >>> result = matcher.resolve("bench press") + >>> result.name + 'BARBELL_BENCH_PRESS' + >>> result.category + 'BENCH_PRESS' +""" + +from dataclasses import dataclass, field +from difflib import SequenceMatcher +from typing import Dict, List, Optional, Set, Tuple +import re + + +@dataclass(frozen=True) +class MatchResult: + """Result of fuzzy exercise name matching. + + Attributes: + name: Garmin exercise name in SCREAMING_SNAKE_CASE + category: Garmin exercise category + score: Match confidence score (0.0 to 1.0) + alternatives: Other potential matches with their scores + """ + + name: str + category: str + score: float + alternatives: List[Tuple[str, str, float]] = field(default_factory=list) + + @property + def is_exact(self) -> bool: + """True if this was an exact match.""" + return self.score >= 1.0 + + @property + def is_confident(self) -> bool: + """True if confidence is high enough to use without warning.""" + return self.score >= 0.8 + + def __str__(self) -> str: + return f"{self.name} ({self.score:.0%})" + + +# ============================================================================= +# EXERCISE DATABASE +# ============================================================================= +# Format: { "EXERCISE_NAME": "CATEGORY" } +# Populated from Garmin FIT SDK Profile +# ============================================================================= + +EXERCISES: Dict[str, str] = { + # ========================================================================= + # BENCH_PRESS Category (Chest Press Movements) + # ========================================================================= + "ALTERNATING_DUMBBELL_CHEST_PRESS": "BENCH_PRESS", + "ALTERNATING_DUMBBELL_CHEST_PRESS_ON_SWISS_BALL": "BENCH_PRESS", + "BARBELL_BENCH_PRESS": "BENCH_PRESS", + "BARBELL_BOARD_BENCH_PRESS": "BENCH_PRESS", + "BARBELL_FLOOR_PRESS": "BENCH_PRESS", + "CLOSE_GRIP_BARBELL_BENCH_PRESS": "BENCH_PRESS", + "DECLINE_DUMBBELL_BENCH_PRESS": "BENCH_PRESS", + "DUMBBELL_BENCH_PRESS": "BENCH_PRESS", + "DUMBBELL_FLOOR_PRESS": "BENCH_PRESS", + "INCLINE_BARBELL_BENCH_PRESS": "BENCH_PRESS", + "INCLINE_DUMBBELL_BENCH_PRESS": "BENCH_PRESS", + "INCLINE_SMITH_MACHINE_BENCH_PRESS": "BENCH_PRESS", + "ISOMETRIC_BARBELL_BENCH_PRESS": "BENCH_PRESS", + "KETTLEBELL_CHEST_PRESS": "BENCH_PRESS", + "NEUTRAL_GRIP_DUMBBELL_BENCH_PRESS": "BENCH_PRESS", + "NEUTRAL_GRIP_DUMBBELL_INCLINE_BENCH_PRESS": "BENCH_PRESS", + "ONE_ARM_FLOOR_PRESS": "BENCH_PRESS", + "WEIGHTED_ONE_ARM_FLOOR_PRESS": "BENCH_PRESS", + "PARTIAL_LOCKOUT": "BENCH_PRESS", + "REVERSE_GRIP_BARBELL_BENCH_PRESS": "BENCH_PRESS", + "REVERSE_GRIP_INCLINE_BENCH_PRESS": "BENCH_PRESS", + "SINGLE_ARM_CABLE_CHEST_PRESS": "BENCH_PRESS", + "SINGLE_ARM_DUMBBELL_BENCH_PRESS": "BENCH_PRESS", + "SMITH_MACHINE_BENCH_PRESS": "BENCH_PRESS", + "SWISS_BALL_DUMBBELL_CHEST_PRESS": "BENCH_PRESS", + "TRIPLE_STOP_BARBELL_BENCH_PRESS": "BENCH_PRESS", + "WIDE_GRIP_BARBELL_BENCH_PRESS": "BENCH_PRESS", + "BENCH_PRESS": "BENCH_PRESS", + "WEIGHTED_BENCH_PRESS": "BENCH_PRESS", + # ========================================================================= + # DEADLIFT Category + # ========================================================================= + "BARBELL_DEADLIFT": "DEADLIFT", + "BARBELL_STRAIGHT_LEG_DEADLIFT": "DEADLIFT", + "DUMBBELL_DEADLIFT": "DEADLIFT", + "DUMBBELL_SINGLE_LEG_DEADLIFT_TO_ROW": "DEADLIFT", + "DUMBBELL_STRAIGHT_LEG_DEADLIFT": "DEADLIFT", + "KETTLEBELL_FLOOR_TO_SHELF": "DEADLIFT", + "KETTLEBELL_DEADLIFT": "DEADLIFT", + "KETTLEBELL_SUMO_DEADLIFT": "DEADLIFT", + "ONE_ARM_ONE_LEG_DEADLIFT": "DEADLIFT", + "RACK_PULL": "DEADLIFT", + "ROTATIONAL_DUMBBELL_STRAIGHT_LEG_DEADLIFT": "DEADLIFT", + "SINGLE_ARM_DEADLIFT": "DEADLIFT", + "SINGLE_LEG_BARBELL_DEADLIFT": "DEADLIFT", + "SINGLE_LEG_BARBELL_STRAIGHT_LEG_DEADLIFT": "DEADLIFT", + "SINGLE_LEG_DEADLIFT_WITH_BARBELL": "DEADLIFT", + "SINGLE_LEG_RDL_CIRCUIT": "DEADLIFT", + "SINGLE_LEG_ROMANIAN_DEADLIFT_CIRCUIT": "DEADLIFT", + "SINGLE_LEG_ROMANIAN_DEADLIFT_WITH_DUMBBELL": "DEADLIFT", + "SUMO_DEADLIFT": "DEADLIFT", + "SUMO_DEADLIFT_HIGH_PULL": "DEADLIFT", + "TRAP_BAR_DEADLIFT": "DEADLIFT", + "WIDE_GRIP_BARBELL_DEADLIFT": "DEADLIFT", + "DEADLIFT": "DEADLIFT", + "WEIGHTED_DEADLIFT": "DEADLIFT", + "ROMANIAN_DEADLIFT": "DEADLIFT", + "STIFF_LEG_DEADLIFT": "DEADLIFT", + "STRAIGHT_LEG_DEADLIFT": "DEADLIFT", + # ========================================================================= + # SQUAT Category + # ========================================================================= + "AIR_SQUAT": "SQUAT", + "BACK_SQUAT_WITH_BODY_BAR": "SQUAT", + "BACK_SQUATS": "SQUAT", + "WEIGHTED_BACK_SQUATS": "SQUAT", + "BALANCING_SQUAT": "SQUAT", + "WEIGHTED_BALANCING_SQUAT": "SQUAT", + "BARBELL_BACK_SQUAT": "SQUAT", + "BARBELL_BOX_SQUAT": "SQUAT", + "BARBELL_FRONT_SQUAT": "SQUAT", + "BARBELL_HACK_SQUAT": "SQUAT", + "BARBELL_HANG_SQUAT_SNATCH": "SQUAT", + "BARBELL_LATERAL_STEP_UP": "SQUAT", + "BARBELL_QUARTER_SQUAT": "SQUAT", + "BARBELL_SIFF_SQUAT": "SQUAT", + "BARBELL_SQUAT_SNATCH": "SQUAT", + "BARBELL_SQUAT_WITH_HEELS_RAISED": "SQUAT", + "BARBELL_STEPOVER": "SQUAT", + "BARBELL_STEP_UP": "SQUAT", + "BENCH_SQUAT_WITH_ROTATIONAL_CHOP": "SQUAT", + "BODY_WEIGHT_WALL_SQUAT": "SQUAT", + "WEIGHTED_WALL_SQUAT": "SQUAT", + "BOX_STEP_SQUAT": "SQUAT", + "BRACED_SQUAT": "SQUAT", + "CROSSED_ARM_BARBELL_FRONT_SQUAT": "SQUAT", + "CROSSOVER_DUMBBELL_STEP_UP": "SQUAT", + "DUMBBELL_FRONT_SQUAT": "SQUAT", + "DUMBBELL_SPLIT_SQUAT": "SQUAT", + "DUMBBELL_SQUAT": "SQUAT", + "DUMBBELL_SQUAT_CLEAN": "SQUAT", + "DUMBBELL_SQUAT_SNATCH": "SQUAT", + "DUMBBELL_STEPOVER": "SQUAT", + "DUMBBELL_STEP_UP": "SQUAT", + "DUMBBELL_THRUSTERS": "SQUAT", + "ELEVATED_SINGLE_LEG_SQUAT": "SQUAT", + "FIGURE_FOUR_SQUATS": "SQUAT", + "GOBLET_SQUAT": "SQUAT", + "KETTLEBELL_SQUAT": "SQUAT", + "KETTLEBELL_SWING_OVERHEAD": "SQUAT", + "KETTLEBELL_SWING_WITH_FLIP_TO_SQUAT": "SQUAT", + "LATERAL_DUMBBELL_STEP_UP": "SQUAT", + "LEG_PRESS": "SQUAT", + "MEDICINE_BALL_SQUAT": "SQUAT", + "ONE_LEGGED_SQUAT": "SQUAT", + "OVERHEAD_BARBELL_SQUAT": "SQUAT", + "OVERHEAD_DUMBBELL_SQUAT": "SQUAT", + "OVERHEAD_SQUAT": "SQUAT", + "PARTIAL_SINGLE_LEG_SQUAT": "SQUAT", + "PISTOL_SQUAT": "SQUAT", + "PLIE_SLIDES": "SQUAT", + "PLIE_SQUAT": "SQUAT", + "PRISONER_SQUAT": "SQUAT", + "SINGLE_LEG_BENCH_GET_UP": "SQUAT", + "SINGLE_LEG_BENCH_SQUAT": "SQUAT", + "SINGLE_LEG_SQUAT_ON_SWISS_BALL": "SQUAT", + "SQUAT": "SQUAT", + "WEIGHTED_SQUAT": "SQUAT", + "SQUATS_WITH_BAND": "SQUAT", + "SQUAT_AND_SIDE_KICK": "SQUAT", + "SQUAT_JUMPS_IN_N_OUT": "SQUAT", + "STAGGERED_SQUAT": "SQUAT", + "STEP_UP": "SQUAT", + "SUITCASE_SQUATS": "SQUAT", + "SUMO_SQUAT": "SQUAT", + "SUMO_SQUAT_SLIDE_IN": "SQUAT", + "SUMO_SQUAT_TO_HIGH_PULL": "SQUAT", + "SUMO_SQUAT_TO_STAND": "SQUAT", + "SUMO_SQUAT_WITH_ROTATION": "SQUAT", + "SWISS_BALL_BODY_WEIGHT_WALL_SQUAT": "SQUAT", + "THRUSTERS": "SQUAT", + "UNEVEN_SQUAT": "SQUAT", + "WAIST_SLIMMING_SQUAT": "SQUAT", + "WALL_BALL": "SQUAT", + "WALL_BALL_SQUAT_AND_PRESS": "SQUAT", + "WIDE_STANCE_BARBELL_SQUAT": "SQUAT", + "WIDE_STANCE_GOBLET_SQUAT": "SQUAT", + "ZERCHER_SQUAT": "SQUAT", + "FRONT_SQUAT": "SQUAT", + "HACK_SQUAT": "SQUAT", + "JUMP_SQUAT": "SQUAT", + "SPLIT_SQUAT": "SQUAT", + "BOX_SQUAT": "SQUAT", + "PAUSE_SQUAT": "SQUAT", + "BULGARIAN_SPLIT_SQUAT": "SQUAT", + "ALTERNATING_BOX_DUMBBELL_STEP_UPS": "SQUAT", + "SQUAT_AMERICAN_SWING": "SQUAT", + # ========================================================================= + # CURL Category (Biceps) + # ========================================================================= + "ALTERNATING_DUMBBELL_BICEPS_CURL": "CURL", + "ALTERNATING_DUMBBELL_BICEPS_CURL_ON_SWISS_BALL": "CURL", + "ALTERNATING_INCLINE_DUMBBELL_BICEPS_CURL": "CURL", + "BARBELL_BICEPS_CURL": "CURL", + "BARBELL_REVERSE_WRIST_CURL": "CURL", + "BARBELL_WRIST_CURL": "CURL", + "BEHIND_THE_BACK_BARBELL_REVERSE_WRIST_CURL": "CURL", + "BEHIND_THE_BACK_ONE_ARM_CABLE_CURL": "CURL", + "CABLE_BICEPS_CURL": "CURL", + "CABLE_HAMMER_CURL": "CURL", + "CHEATING_BARBELL_BICEPS_CURL": "CURL", + "CLOSE_GRIP_EZ_BAR_BICEPS_CURL": "CURL", + "CROSS_BODY_DUMBBELL_HAMMER_CURL": "CURL", + "DEAD_HANG_BICEPS_CURL": "CURL", + "DECLINE_HAMMER_CURL": "CURL", + "DUMBBELL_BICEPS_CURL": "CURL", + "DUMBBELL_BICEPS_CURL_WITH_STATIC_HOLD": "CURL", + "DUMBBELL_HAMMER_CURL": "CURL", + "DUMBBELL_REVERSE_WRIST_CURL": "CURL", + "DUMBBELL_WRIST_CURL": "CURL", + "EZ_BAR_PREACHER_CURL": "CURL", + "FORWARD_BEND_BICEPS_CURL": "CURL", + "HAMMER_CURL_TO_PRESS": "CURL", + "INCLINE_DUMBBELL_BICEPS_CURL": "CURL", + "INCLINE_OFFSET_THUMB_DUMBBELL_CURL": "CURL", + "KETTLEBELL_BICEPS_CURL": "CURL", + "LYING_CONCENTRATION_CABLE_CURL": "CURL", + "ONE_ARM_CONCENTRATION_CURL": "CURL", + "ONE_ARM_PREACHER_CURL": "CURL", + "PLATE_PINCH_CURL": "CURL", + "PREACHER_CURL_WITH_CABLE": "CURL", + "REVERSE_EZ_BAR_CURL": "CURL", + "REVERSE_GRIP_BARBELL_BICEPS_CURL": "CURL", + "REVERSE_GRIP_WRIST_CURL": "CURL", + "SEATED_ALTERNATING_DUMBBELL_BICEPS_CURL": "CURL", + "SEATED_DUMBBELL_BICEPS_CURL": "CURL", + "SEATED_REVERSE_DUMBBELL_CURL": "CURL", + "SPLIT_STANCE_OFFSET_PINKY_DUMBBELL_CURL": "CURL", + "STANDING_ALTERNATING_DUMBBELL_CURLS": "CURL", + "STANDING_DUMBBELL_BICEPS_CURL": "CURL", + "STANDING_EZ_BAR_BICEPS_CURL": "CURL", + "STANDING_ZOTTMAN_BICEPS_CURL": "CURL", + "STATIC_CURL": "CURL", + "SWISS_BALL_DUMBBELL_OVERHEAD_TRICEPS_EXTENSION": "CURL", + "SWISS_BALL_EZ_BAR_PREACHER_CURL": "CURL", + "TWISTING_STANDING_DUMBBELL_BICEPS_CURL": "CURL", + "WIDE_GRIP_EZ_BAR_BICEPS_CURL": "CURL", + "BICEPS_CURL": "CURL", + "HAMMER_CURL": "CURL", + "PREACHER_CURL": "CURL", + "CONCENTRATION_CURL": "CURL", + "SPIDER_CURL": "CURL", + "ZOTTMAN_CURL": "CURL", + "INCLINE_CURL": "CURL", + "CABLE_CURL": "CURL", + "DRAG_CURL": "CURL", + "WRIST_CURL": "CURL", + "REVERSE_CURL": "CURL", + # ========================================================================= + # ROW Category + # ========================================================================= + "ALTERNATING_DUMBBELL_ROW": "ROW", + "BANDED_FACE_PULLS": "ROW", + "BARBELL_ROW": "ROW", + "BARBELL_STRAIGHT_LEG_DEADLIFT_TO_ROW": "ROW", + "BENT_OVER_ROW_WITH_BARBELL": "ROW", + "BENT_OVER_ROW_WITH_DUMBBELL": "ROW", + "CABLE_ROW_STANDING": "ROW", + "CHEST_SUPPORTED_DUMBBELL_ROW": "ROW", + "DECLINE_RING_ROW": "ROW", + "DUMBBELL_ROW": "ROW", + "ELEVATED_FEET_INVERTED_ROW": "ROW", + "ELEVATED_RING_ROW": "ROW", + "FACE_PULL": "ROW", + "FACE_PULL_WITH_EXTERNAL_ROTATION": "ROW", + "INDOOR_ROW": "ROW", + "INVERTED_ROW": "ROW", + "INVERTED_ROW_WITH_FEET_ON_SWISS_BALL": "ROW", + "KETTLEBELL_ROW": "ROW", + "MODIFIED_INVERTED_ROW": "ROW", + "NEUTRAL_GRIP_ALTERNATING_DUMBBELL_ROW": "ROW", + "ONE_ARM_BENT_OVER_ROW": "ROW", + "ONE_LEGGED_DUMBBELL_ROW": "ROW", + "RDL_BENT_OVER_ROW_WITH_BARBELL_DUMBBELL": "ROW", + "RENEGADE_ROW": "ROW", + "REVERSE_GRIP_BARBELL_ROW": "ROW", + "RING_ROW": "ROW", + "ROPE_HANDLE_CABLE_ROW": "ROW", + "ROW": "ROW", + "WEIGHTED_ROW": "ROW", + "SEATED_CABLE_ROW": "ROW", + "SEATED_DUMBBELL_ROW": "ROW", + "SEATED_UNDERHAND_GRIP_CABLE_ROW": "ROW", + "SINGLE_ARM_CABLE_ROW": "ROW", + "SINGLE_ARM_CABLE_ROW_AND_ROTATION": "ROW", + "SINGLE_ARM_INVERTED_ROW": "ROW", + "SINGLE_ARM_NEUTRAL_GRIP_DUMBBELL_ROW": "ROW", + "SINGLE_ARM_NEUTRAL_GRIP_DUMBBELL_ROW_AND_ROTATION": "ROW", + "SUSPENDED_INVERTED_ROW": "ROW", + "T_BAR_ROW": "ROW", + "TOWEL_GRIP_INVERTED_ROW": "ROW", + "TRX_INVERTED_ROW": "ROW", + "UNDERHAND_GRIP_CABLE_ROW": "ROW", + "V_GRIP_CABLE_ROW": "ROW", + "WIDE_GRIP_SEATED_CABLE_ROW": "ROW", + "BENT_OVER_ROW": "ROW", + "PENDLAY_ROW": "ROW", + "YATES_ROW": "ROW", + "MEADOWS_ROW": "ROW", + "GORILLA_ROW": "ROW", + "SEAL_ROW": "ROW", + "KROC_ROW": "ROW", + # ========================================================================= + # LUNGE Category + # ========================================================================= + "ALTERNATING_BARBELL_FORWARD_LUNGE": "LUNGE", + "ALTERNATING_DUMBBELL_LUNGE_WITH_REACH": "LUNGE", + "BACK_FOOT_ELEVATED_DUMBBELL_SPLIT_SQUAT": "LUNGE", + "BARBELL_BOX_LUNGE": "LUNGE", + "BARBELL_BULGARIAN_SPLIT_SQUAT": "LUNGE", + "BARBELL_CROSSOVER_LUNGE": "LUNGE", + "BARBELL_FRONT_SPLIT_SQUAT": "LUNGE", + "BARBELL_LUNGE": "LUNGE", + "BARBELL_REVERSE_LUNGE": "LUNGE", + "BARBELL_SIDE_LUNGE": "LUNGE", + "BARBELL_SPLIT_SQUAT": "LUNGE", + "CORE_CONTROL_REAR_LUNGE": "LUNGE", + "CURTSY_LUNGE": "LUNGE", + "DIAGONAL_LUNGE": "LUNGE", + "DROP_LUNGE": "LUNGE", + "DUMBBELL_BOX_LUNGE": "LUNGE", + "DUMBBELL_BULGARIAN_SPLIT_SQUAT": "LUNGE", + "DUMBBELL_CROSSOVER_LUNGE": "LUNGE", + "DUMBBELL_DIAGONAL_LUNGE": "LUNGE", + "DUMBBELL_LUNGE": "LUNGE", + "DUMBBELL_LUNGE_AND_ROTATION": "LUNGE", + "DUMBBELL_OVERHEAD_BULGARIAN_SPLIT_SQUAT": "LUNGE", + "DUMBBELL_OVERHEAD_WALKING_LUNGE": "LUNGE", + "DUMBBELL_REVERSE_LUNGE": "LUNGE", + "DUMBBELL_REVERSE_LUNGE_TO_HIGH_KNEE_AND_PRESS": "LUNGE", + "DUMBBELL_SIDE_LUNGE": "LUNGE", + "ELEVATED_FRONT_FOOT_BARBELL_SPLIT_SQUAT": "LUNGE", + "ELEVATED_FRONT_FOOT_DUMBBELL_SPLIT_SQUAT": "LUNGE", + "FORWARD_LUNGE": "LUNGE", + "FORWARD_SLIDE_LUNGE": "LUNGE", + "FRONT_FOOT_ELEVATED_LUNGE": "LUNGE", + "FRONT_LUNGE": "LUNGE", + "GUN_SLINGER_LUNGE": "LUNGE", + "LUNGE": "LUNGE", + "WEIGHTED_LUNGE": "LUNGE", + "LUNGE_MATRIX": "LUNGE", + "LUNGE_WITH_ARM_REACH": "LUNGE", + "LUNGE_WITH_DIAGONAL_REACH": "LUNGE", + "LUNGE_WITH_SIDE_BEND": "LUNGE", + "OFFSET_DUMBBELL_LUNGE": "LUNGE", + "OFFSET_DUMBBELL_REVERSE_LUNGE": "LUNGE", + "OVERHEAD_BARBELL_REVERSE_LUNGE": "LUNGE", + "OVERHEAD_BARBELL_SPLIT_SQUAT": "LUNGE", + "OVERHEAD_DUMBBELL_REVERSE_LUNGE": "LUNGE", + "OVERHEAD_DUMBBELL_SPLIT_SQUAT": "LUNGE", + "OVERHEAD_LUNGE": "LUNGE", + "OVERHEAD_LUNGE_WITH_ROTATION": "LUNGE", + "REAR_LUNGE": "LUNGE", + "REVERSE_BARBELL_BOX_LUNGE": "LUNGE", + "REVERSE_BOX_LUNGE": "LUNGE", + "REVERSE_DUMBBELL_BOX_LUNGE": "LUNGE", + "REVERSE_DUMBBELL_CROSSOVER_LUNGE": "LUNGE", + "REVERSE_DUMBBELL_DIAGONAL_LUNGE": "LUNGE", + "REVERSE_LUNGE": "LUNGE", + "REVERSE_LUNGE_WITH_REACH_BACK": "LUNGE", + "REVERSE_LUNGE_WITH_TWIST_AND_OVERHEAD_REACH": "LUNGE", + "REVERSE_SLIDING_BOX_LUNGE": "LUNGE", + "REVERSE_SLIDING_LUNGE": "LUNGE", + "RUNNERS_LUNGE_TO_BALANCE": "LUNGE", + "SCISSOR_POWER_SWITCH": "LUNGE", + "SHIFTING_SIDE_LUNGE": "LUNGE", + "SIDE_AND_CROSSOVER_LUNGE": "LUNGE", + "SIDE_LUNGE": "LUNGE", + "WEIGHTED_SIDE_LUNGE": "LUNGE", + "SIDE_LUNGE_AND_PRESS": "LUNGE", + "SIDE_LUNGE_JUMP_OFF": "LUNGE", + "SIDE_LUNGE_SWEEP": "LUNGE", + "SIDE_LUNGE_TDL": "LUNGE", + "SIDE_LUNGE_TO_CROSSOVER_TAP": "LUNGE", + "SIDE_TO_SIDE_LUNGE_CHOPS": "LUNGE", + "SIFF_JUMP_LUNGE": "LUNGE", + "SINGLE_ARM_REVERSE_LUNGE_AND_PRESS": "LUNGE", + "SLIDING_LATERAL_LUNGE": "LUNGE", + "SPLIT_BULGARIAN_SQUAT": "LUNGE", + "WALKING_BARBELL_LUNGE": "LUNGE", + "WALKING_DUMBBELL_LUNGE": "LUNGE", + "WALKING_LUNGE": "LUNGE", + "WEIGHTED_WALKING_LUNGE": "LUNGE", + "ALTERNATING_DUMBBELL_LUNGE": "LUNGE", + "LATERAL_LUNGE": "LUNGE", + "STEP_THROUGH_LUNGE": "LUNGE", + "JUMPING_LUNGE": "LUNGE", + # ========================================================================= + # SHOULDER_PRESS Category + # ========================================================================= + "ALTERNATING_DUMBBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "ARNOLD_PRESS": "SHOULDER_PRESS", + "BARBELL_FRONT_SQUAT_TO_PUSH_PRESS": "SHOULDER_PRESS", + "BARBELL_PUSH_PRESS": "SHOULDER_PRESS", + "BARBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "DEAD_CURL_PRESS": "SHOULDER_PRESS", + "DUMBBELL_ALTERNATING_SHOULDER_PRESS_AND_TWIST": "SHOULDER_PRESS", + "DUMBBELL_HAMMER_CURL_TO_LUNGE_TO_PRESS": "SHOULDER_PRESS", + "DUMBBELL_PUSH_PRESS": "SHOULDER_PRESS", + "DUMBBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "FLOOR_INVERTED_SHOULDER_PRESS": "SHOULDER_PRESS", + "INVERTED_SHOULDER_PRESS": "SHOULDER_PRESS", + "KETTLEBELL_PUSH_PRESS": "SHOULDER_PRESS", + "KETTLEBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "LANDMINE_PRESS": "SHOULDER_PRESS", + "ONE_ARM_PUSH_PRESS": "SHOULDER_PRESS", + "OVERHEAD_BARBELL_PRESS": "SHOULDER_PRESS", + "OVERHEAD_DUMBBELL_PRESS": "SHOULDER_PRESS", + "SEATED_BARBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "SEATED_DUMBBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "SINGLE_ARM_DUMBBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "SINGLE_ARM_STEP_UP_AND_PRESS": "SHOULDER_PRESS", + "SMITH_MACHINE_OVERHEAD_PRESS": "SHOULDER_PRESS", + "SPLIT_STANCE_HAMMER_CURL_TO_PRESS": "SHOULDER_PRESS", + "SWISS_BALL_DUMBBELL_SHOULDER_PRESS": "SHOULDER_PRESS", + "WEIGHT_PLATE_FRONT_RAISE": "SHOULDER_PRESS", + "SHOULDER_PRESS": "SHOULDER_PRESS", + "WEIGHTED_SHOULDER_PRESS": "SHOULDER_PRESS", + "MILITARY_PRESS": "SHOULDER_PRESS", + "OVERHEAD_PRESS": "SHOULDER_PRESS", + "PUSH_PRESS": "SHOULDER_PRESS", + "STRICT_PRESS": "SHOULDER_PRESS", + "Z_PRESS": "SHOULDER_PRESS", + "VIKING_PRESS": "SHOULDER_PRESS", + "LOG_PRESS": "SHOULDER_PRESS", + "BEHIND_NECK_PRESS": "SHOULDER_PRESS", + # ========================================================================= + # LATERAL_RAISE Category + # ========================================================================= + "45_DEGREE_CABLE_EXTERNAL_ROTATION": "LATERAL_RAISE", + "ALTERNATING_LATERAL_RAISE_WITH_STATIC_HOLD": "LATERAL_RAISE", + "BAR_MUSCLE_UP": "LATERAL_RAISE", + "BENT_OVER_LATERAL_RAISE": "LATERAL_RAISE", + "CABLE_DIAGONAL_RAISE": "LATERAL_RAISE", + "CABLE_FRONT_RAISE": "LATERAL_RAISE", + "CALORIE_ROW": "LATERAL_RAISE", + "COMBO_SHOULDER_RAISE": "LATERAL_RAISE", + "DUMBBELL_DIAGONAL_RAISE": "LATERAL_RAISE", + "DUMBBELL_V_RAISE": "LATERAL_RAISE", + "FRONT_RAISE": "LATERAL_RAISE", + "LEANING_DUMBBELL_LATERAL_RAISE": "LATERAL_RAISE", + "LYING_DUMBBELL_RAISE": "LATERAL_RAISE", + "MUSCLE_UP": "LATERAL_RAISE", + "ONE_ARM_CABLE_LATERAL_RAISE": "LATERAL_RAISE", + "OVERHAND_GRIP_REAR_LATERAL_RAISE": "LATERAL_RAISE", + "PLATE_RAISES": "LATERAL_RAISE", + "RING_DIP": "LATERAL_RAISE", + "RING_MUSCLE_UP": "LATERAL_RAISE", + "ROPE_CLIMB": "LATERAL_RAISE", + "SCAPTION": "LATERAL_RAISE", + "SCAPTION_AND_SHRUG": "LATERAL_RAISE", + "SEATED_LATERAL_RAISE": "LATERAL_RAISE", + "SEATED_REAR_LATERAL_RAISE": "LATERAL_RAISE", + "SIDE_LYING_LATERAL_RAISE": "LATERAL_RAISE", + "STANDING_LIFT": "LATERAL_RAISE", + "SUSPENDED_ROW": "LATERAL_RAISE", + "UNDERHAND_GRIP_REAR_LATERAL_RAISE": "LATERAL_RAISE", + "WALL_SLIDE": "LATERAL_RAISE", + "WEIGHTED_WALL_SLIDE": "LATERAL_RAISE", + "LATERAL_RAISE": "LATERAL_RAISE", + "WEIGHTED_LATERAL_RAISE": "LATERAL_RAISE", + "ARM_CIRCLES": "LATERAL_RAISE", + "DUMBBELL_FRONT_RAISE": "LATERAL_RAISE", + "CABLE_LATERAL_RAISE": "LATERAL_RAISE", + "REAR_DELT_FLY": "LATERAL_RAISE", + "DUMBBELL_LATERAL_RAISE": "LATERAL_RAISE", + "UPRIGHT_ROW": "LATERAL_RAISE", + # ========================================================================= + # TRICEPS_EXTENSION Category + # ========================================================================= + "BENCH_DIP": "TRICEPS_EXTENSION", + "WEIGHTED_BENCH_DIP": "TRICEPS_EXTENSION", + "BODY_WEIGHT_DIP": "TRICEPS_EXTENSION", + "CABLE_KICKBACK": "TRICEPS_EXTENSION", + "CABLE_LYING_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "CABLE_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "DUMBBELL_KICKBACK": "TRICEPS_EXTENSION", + "DUMBBELL_LYING_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "EZ_BAR_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "INCLINE_DIP": "TRICEPS_EXTENSION", + "WEIGHTED_INCLINE_DIP": "TRICEPS_EXTENSION", + "INCLINE_EZ_BAR_LYING_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "LYING_DUMBBELL_PULLOVER_TO_EXTENSION": "TRICEPS_EXTENSION", + "LYING_EZ_BAR_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "LYING_TRICEPS_EXTENSION_TO_CLOSE_GRIP_BENCH_PRESS": "TRICEPS_EXTENSION", + "NEUTRAL_GRIP_TRICEPS_PUSHDOWN": "TRICEPS_EXTENSION", + "OVERHEAD_DUMBBELL_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "PRONATED_GRIP_TRICEPS_PUSHDOWN": "TRICEPS_EXTENSION", + "REVERSE_GRIP_TRICEPS_PUSHDOWN": "TRICEPS_EXTENSION", + "ROPE_PUSHDOWN": "TRICEPS_EXTENSION", + "SEATED_BARBELL_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "SEATED_DUMBBELL_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "SEATED_EZ_BAR_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "SEATED_SINGLE_ARM_OVERHEAD_DUMBBELL_EXTENSION": "TRICEPS_EXTENSION", + "SINGLE_ARM_DUMBBELL_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "SINGLE_DUMBBELL_SEATED_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "STATIC_LYING_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "SUSPENDED_DIP": "TRICEPS_EXTENSION", + "WEIGHTED_SUSPENDED_DIP": "TRICEPS_EXTENSION", + "SWISS_BALL_DUMBBELL_LYING_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "SWISS_BALL_EZ_BAR_LYING_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "SWISS_BALL_EZ_BAR_OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "TABLETOP_DIP": "TRICEPS_EXTENSION", + "TRICEPS_DIP": "TRICEPS_EXTENSION", + "WEIGHTED_TRICEPS_DIP": "TRICEPS_EXTENSION", + "TRICEPS_EXTENSION_ON_FLOOR": "TRICEPS_EXTENSION", + "TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "WEIGHTED_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "TRICEPS_PUSHDOWN": "TRICEPS_EXTENSION", + "SKULL_CRUSHER": "TRICEPS_EXTENSION", + "CLOSE_GRIP_BENCH_PRESS": "TRICEPS_EXTENSION", + "DIAMOND_PUSH_UP": "TRICEPS_EXTENSION", + "TRICEPS_KICKBACK": "TRICEPS_EXTENSION", + "OVERHEAD_TRICEPS_EXTENSION": "TRICEPS_EXTENSION", + "DIP": "TRICEPS_EXTENSION", + "WEIGHTED_DIP": "TRICEPS_EXTENSION", + "JM_PRESS": "TRICEPS_EXTENSION", + "TATE_PRESS": "TRICEPS_EXTENSION", + # ========================================================================= + # PULL_UP Category + # ========================================================================= + "BANDED_PULL_UP": "PULL_UP", + "BURPEE_PULL_UP": "PULL_UP", + "CHEST_TO_BAR_PULL_UP": "PULL_UP", + "CHIN_UP": "PULL_UP", + "CLOSE_GRIP_CHIN_UP": "PULL_UP", + "CLOSE_GRIP_LAT_PULLDOWN": "PULL_UP", + "CROSSOVER_CHIN_UP": "PULL_UP", + "EZ_BAR_PULLOVER": "PULL_UP", + "HANGING_HURDLE": "PULL_UP", + "JUMPING_PULL_UPS": "PULL_UP", + "KIPPING_PULL_UP": "PULL_UP", + "KNEELING_LAT_PULLDOWN": "PULL_UP", + "KNEELING_UNDERHAND_GRIP_LAT_PULLDOWN": "PULL_UP", + "LAT_PULLDOWN": "PULL_UP", + "L_PULL_UP": "PULL_UP", + "MIXED_GRIP_CHIN_UP": "PULL_UP", + "MIXED_GRIP_PULL_UP": "PULL_UP", + "NEUTRAL_GRIP_CHIN_UP": "PULL_UP", + "NEUTRAL_GRIP_PULL_UP": "PULL_UP", + "ONE_ARM_CHIN_UP": "PULL_UP", + "PARALLEL_CLOSE_GRIP_PULL_UP": "PULL_UP", + "PULL_UP": "PULL_UP", + "WEIGHTED_PULL_UP": "PULL_UP", + "PULL_UPS": "PULL_UP", + "REVERSE_CLOSE_GRIP_LAT_PULLDOWN": "PULL_UP", + "STANDING_CABLE_PULLOVER": "PULL_UP", + "STRAIGHT_ARM_PULLDOWN": "PULL_UP", + "SUSPENDED_CHIN_UP": "PULL_UP", + "SWISS_BALL_EZ_BAR_PULLOVER": "PULL_UP", + "TOWEL_PULL_UP": "PULL_UP", + "UNDERHAND_GRIP_LAT_PULLDOWN": "PULL_UP", + "WEIGHTED_CHIN_UP": "PULL_UP", + "WIDE_GRIP_LAT_PULLDOWN": "PULL_UP", + "WIDE_GRIP_PULL_UP": "PULL_UP", + "WIDE_GRIP_PULL_UPS": "PULL_UP", + "COMMANDO_PULL_UP": "PULL_UP", + "ARCHER_PULL_UP": "PULL_UP", + "TYPEWRITER_PULL_UP": "PULL_UP", + "MUSCLE_UP_PULL_UP": "PULL_UP", + # ========================================================================= + # PUSH_UP Category + # ========================================================================= + "ALTERNATING_STAGGERED_PUSH_UP": "PUSH_UP", + "CHEST_PRESS_WITH_BAND": "PUSH_UP", + "CLAPPING_PUSH_UP": "PUSH_UP", + "CLOSE_GRIP_MEDICINE_BALL_PUSH_UP": "PUSH_UP", + "CLOSE_HANDS_PUSH_UP": "PUSH_UP", + "DECLINE_PUSH_UP": "PUSH_UP", + "DIAMOND_PUSH_UP_ON_KNEES": "PUSH_UP", + "DIVEBOMBER_PUSH_UP": "PUSH_UP", + "EXPLOSIVE_CROSSOVER_PUSH_UP": "PUSH_UP", + "EXPLOSIVE_PUSH_UP": "PUSH_UP", + "FEET_ELEVATED_SIDE_TO_SIDE_PUSH_UP": "PUSH_UP", + "FLOOR_PRESS": "PUSH_UP", + "HAND_RELEASE_PUSH_UP": "PUSH_UP", + "INCLINE_PUSH_UP": "PUSH_UP", + "ISOMETRIC_EXPLOSIVE_PUSH_UP": "PUSH_UP", + "JUDO_PUSH_UP": "PUSH_UP", + "KNEELING_PUSH_UP": "PUSH_UP", + "MEDICINE_BALL_CHEST_PASS": "PUSH_UP", + "MEDICINE_BALL_PUSH_UP": "PUSH_UP", + "MODIFIED_PUSH_UP": "PUSH_UP", + "ONE_ARM_PUSH_UP": "PUSH_UP", + "PARALLEL_BAR_DIP": "PUSH_UP", + "PIKE_PUSH_UP": "PUSH_UP", + "PLYOMETRIC_SIDE_TO_SIDE_PUSH_UP": "PUSH_UP", + "PUSH_UP": "PUSH_UP", + "WEIGHTED_PUSH_UP": "PUSH_UP", + "PUSH_UP_AND_ROW": "PUSH_UP", + "PUSH_UP_ON_SWISS_BALL": "PUSH_UP", + "PUSH_UP_PLUS": "PUSH_UP", + "PUSH_UP_WITH_FEET_ON_SWISS_BALL": "PUSH_UP", + "PUSH_UP_WITH_HANDS_ON_SWISS_BALL": "PUSH_UP", + "RING_PUSH_UP": "PUSH_UP", + "SHOULDER_TAP_PUSH_UP": "PUSH_UP", + "SINGLE_ARM_MEDICINE_BALL_PUSH_UP": "PUSH_UP", + "SPIDERMAN_PUSH_UP": "PUSH_UP", + "STACKED_FEET_PUSH_UP": "PUSH_UP", + "STAGGERED_HANDS_PUSH_UP": "PUSH_UP", + "STANDING_CABLE_CHEST_PRESS": "PUSH_UP", + "STANDING_INCLINE_FLY": "PUSH_UP", + "SUSPENDED_PUSH_UP": "PUSH_UP", + "SWISS_BALL_PUSH_UP": "PUSH_UP", + "SWISS_BALL_PUSH_UP_PLUS": "PUSH_UP", + "T_PUSH_UP": "PUSH_UP", + "TRIPLE_STOP_PUSH_UP": "PUSH_UP", + "WIDE_HANDS_PUSH_UP": "PUSH_UP", + "WALL_PUSH_UP": "PUSH_UP", + "HINDU_PUSH_UP": "PUSH_UP", + "ARCHER_PUSH_UP": "PUSH_UP", + "PSEUDO_PLANCHE_PUSH_UP": "PUSH_UP", + # ========================================================================= + # PLANK Category + # ========================================================================= + "45_DEGREE_PLANK": "PLANK", + "BEAR_CRAWL": "PLANK", + "CROSS_BODY_MOUNTAIN_CLIMBER": "PLANK", + "ELBOW_PLANK_PIKE_JACKS": "PLANK", + "ELEVATED_FEET_PLANK": "PLANK", + "ELEVATED_HANDS_PLANK": "PLANK", + "ELEVATOR_ABS": "PLANK", + "EXTENDED_PLANK": "PLANK", + "FULL_PLANK_PASSE_TWIST": "PLANK", + "INCHING_ELBOW_PLANK": "PLANK", + "INCHWORM": "PLANK", + "INCHWORM_TO_SIDE_PLANK": "PLANK", + "KNEELING_PLANK": "PLANK", + "KNEELING_SIDE_PLANK_WITH_LEG_LIFT": "PLANK", + "LATERAL_ROLL": "PLANK", + "LYING_REVERSE_PLANK": "PLANK", + "MEDICINE_BALL_MOUNTAIN_CLIMBER": "PLANK", + "MODIFIED_MOUNTAIN_CLIMBER_AND_EXTENSION": "PLANK", + "MOUNTAIN_CLIMBER": "PLANK", + "WEIGHTED_MOUNTAIN_CLIMBER": "PLANK", + "MOUNTAIN_CLIMBER_ON_SLIDING_DISCS": "PLANK", + "MOUNTAIN_CLIMBER_WITH_FEET_ON_BOSU_BALL": "PLANK", + "MOUNTAIN_CLIMBER_WITH_HANDS_ON_BENCH": "PLANK", + "MOUNTAIN_CLIMBER_WITH_HANDS_ON_SWISS_BALL": "PLANK", + "ONE_ARM_ONE_LEG_PLANK": "PLANK", + "ONE_LEG_PLANK": "PLANK", + "PLANK": "PLANK", + "WEIGHTED_PLANK": "PLANK", + "PLANK_JACKS": "PLANK", + "PLANK_KNEE_TWIST": "PLANK", + "PLANK_ON_ELBOWS": "PLANK", + "PLANK_PIKE_JUMPS": "PLANK", + "PLANK_PIKES": "PLANK", + "PLANK_TO_STAND_UP": "PLANK", + "PLANK_WITH_ARM_RAISE": "PLANK", + "PLANK_WITH_ARM_VARIATIONS": "PLANK", + "PLANK_WITH_FEET_ON_SWISS_BALL": "PLANK", + "PLANK_WITH_GLUTE_KICK_BACK": "PLANK", + "PLANK_WITH_HIP_EXTENSION": "PLANK", + "PLANK_WITH_KNEE_TO_CHEST": "PLANK", + "PLANK_WITH_KNEE_TO_ELBOW": "PLANK", + "PLANK_WITH_LEG_LIFT": "PLANK", + "PLANK_WITH_OBLIQUE_CRUNCH": "PLANK", + "PLYOMETRIC_SIDE_PLANK": "PLANK", + "REVERSE_PLANK": "PLANK", + "ROLLING_SIDE_PLANK": "PLANK", + "SIDE_KICK_PLANK": "PLANK", + "SIDE_PLANK": "PLANK", + "WEIGHTED_SIDE_PLANK": "PLANK", + "SIDE_PLANK_AND_ROW": "PLANK", + "SIDE_PLANK_LEG_LIFT": "PLANK", + "SIDE_PLANK_WITH_ELBOW_ON_BOSU_BALL": "PLANK", + "SIDE_PLANK_WITH_FEET_ON_BENCH": "PLANK", + "SIDE_PLANK_WITH_HIP_ABDUCTION": "PLANK", + "SIDE_PLANK_WITH_KNEE_CIRCLE": "PLANK", + "SIDE_PLANK_WITH_KNEE_TUCK": "PLANK", + "SIDE_PLANK_WITH_LEG_LIFT": "PLANK", + "SIDE_PLANK_WITH_REACH_UNDER": "PLANK", + "SINGLE_ARM_PLANK": "PLANK", + "SINGLE_LEG_ELEVATED_FEET_PLANK": "PLANK", + "SINGLE_LEG_FLEX_AND_EXTEND": "PLANK", + "SINGLE_LEG_SIDE_PLANK": "PLANK", + "SLIDING_MOUNTAIN_CLIMBER": "PLANK", + "STRAIGHT_ARM_PLANK": "PLANK", + "STRAIGHT_ARM_PLANK_WITH_SHOULDER_TOUCH": "PLANK", + "SUPERMAN_PLANK": "PLANK", + "SWISS_BALL_PLANK": "PLANK", + "SWISS_BALL_PLANK_LEG_LIFT": "PLANK", + "SWISS_BALL_PLANK_LEG_LIFT_AND_HOLD": "PLANK", + "SWISS_BALL_PLANK_WITH_FEET_ON_BENCH": "PLANK", + "SWISS_BALL_PRONE_JACKKNIFE": "PLANK", + "SWISS_BALL_SIDE_PLANK": "PLANK", + "THREE_WAY_PLANK": "PLANK", + "TOWEL_PLANK_AND_KNEE_IN": "PLANK", + "T_STABILIZATION": "PLANK", + "TURKISH_GET_UP_TO_SIDE_PLANK": "PLANK", + "TWO_POINT_PLANK": "PLANK", + "WEIGHTED_EXTENDED_PLANK": "PLANK", + "WEIGHTED_PLANK_ON_ELBOWS": "PLANK", + "WIDE_STANCE_PLANK_WITH_DIAGONAL_ARM_LIFT": "PLANK", + "WIDE_STANCE_PLANK_WITH_DIAGONAL_LEG_LIFT": "PLANK", + "WIDE_STANCE_PLANK_WITH_LEG_LIFT": "PLANK", + "WIDE_STANCE_PLANK_WITH_OPPOSITE_ARM_AND_LEG_LIFT": "PLANK", + "FOREARM_PLANK": "PLANK", + "HIGH_PLANK": "PLANK", + "LOW_PLANK": "PLANK", + "UP_DOWN_PLANK": "PLANK", + "BODY_SAW": "PLANK", + # ========================================================================= + # CORE Category + # ========================================================================= + "BICYCLE_MANEUVER": "CORE", + "CABLE_CORE_PRESS": "CORE", + "DEAD_BUG": "CORE", + "FIRE_HYDRANT": "CORE", + "HALF_KNEEL_CHOP": "CORE", + "HALF_KNEEL_LIFT": "CORE", + "HALF_KNEEL_ROTATION": "CORE", + "HANGING_KNEE_RAISE": "CORE", + "MEDICINE_BALL_SLAM": "CORE", + "OVERHEAD_CIRCLES": "CORE", + "PRONE_BACK_EXTENSION": "CORE", + "RUSSIAN_TWIST": "CORE", + "SLAM_BALL": "CORE", + "SPIDERMAN_CRUNCH": "CORE", + "STANDING_ROTATIONAL_CHOP": "CORE", + "STANDING_ROTATIONAL_LIFT": "CORE", + "SUPERMAN": "CORE", + "SUPERMAN_HOLD": "CORE", + "SWISS_BALL_BACK_EXTENSION": "CORE", + "SWISS_BALL_CRUNCH": "CORE", + "SWISS_BALL_DECLINE_OBLIQUE_CRUNCH": "CORE", + "SWISS_BALL_JACKKNIFE": "CORE", + "SWISS_BALL_PIKE": "CORE", + "SWISS_BALL_PLANK_LEG_LIFT_AND_HOLD_CORE": "CORE", + "SWISS_BALL_ROLLOUT": "CORE", + "SWISS_BALL_STIR_THE_POT": "CORE", + "SWISS_BALL_V_UP": "CORE", + "TUCK_HOLD": "CORE", + "V_UP": "CORE", + "WEIGHTED_RUSSIAN_TWIST": "CORE", + "WEIGHTED_V_UP": "CORE", + "WINDMILL": "CORE", + "WINDSHIELD_WIPER": "CORE", + "AB_WHEEL_ROLLOUT": "CORE", + "HOLLOW_BODY_HOLD": "CORE", + "HOLLOW_ROCK": "CORE", + "BIRD_DOG": "CORE", + "PALLOF_PRESS": "CORE", + "LANDMINE_ROTATION": "CORE", + "CABLE_WOODCHOP": "CORE", + "FARMERS_WALK": "CORE", + "SUITCASE_CARRY": "CORE", + "TURKISH_GET_UP": "CORE", + "BICYCLE_CRUNCH": "CORE", + # ========================================================================= + # CRUNCH Category + # ========================================================================= + "BICYCLE_CRUNCH_EXERCISE": "CRUNCH", + "CABLE_CRUNCH": "CRUNCH", + "CIRCULAR_ARM_CRUNCH": "CRUNCH", + "CROSSED_ARMS_CRUNCH": "CRUNCH", + "CROSSOVER_CRUNCH": "CRUNCH", + "CRUNCH": "CRUNCH", + "WEIGHTED_CRUNCH": "CRUNCH", + "CRUNCH_CHOP": "CRUNCH", + "DOUBLE_CRUNCH": "CRUNCH", + "DUMBBELL_LEG_EXTENSION_HIP_LIFT": "CRUNCH", + "ELBOW_TO_KNEE_CRUNCH": "CRUNCH", + "FLUTTER_KICK": "CRUNCH", + "FOAM_ROLLER_REVERSE_CRUNCH_ON_BENCH": "CRUNCH", + "FOAM_ROLLER_REVERSE_CRUNCH_WITH_DUMBBELL": "CRUNCH", + "FOAM_ROLLER_REVERSE_CRUNCH_WITH_MEDICINE_BALL": "CRUNCH", + "FROG_PRESS": "CRUNCH", + "HANGING_KNEE_RAISE_OBLIQUE_CRUNCH": "CRUNCH", + "HIP_CROSSOVER": "CRUNCH", + "HOLLOW_ROCK_CRUNCH": "CRUNCH", + "INCLINE_REVERSE_CRUNCH": "CRUNCH", + "KNEELING_CABLE_CRUNCH": "CRUNCH", + "KNEELING_CROSS_CRUNCH": "CRUNCH", + "KNEELING_OBLIQUE_CABLE_CRUNCH": "CRUNCH", + "KNEES_TO_ELBOW": "CRUNCH", + "LEG_EXTENSIONS": "CRUNCH", + "LEG_LEVERS": "CRUNCH", + "MCGILL_CURL_UP": "CRUNCH", + "MODIFIED_PILATES_ROLL_UP_WITH_BALL": "CRUNCH", + "PILATES_CRUNCH": "CRUNCH", + "PILATES_ROLL_UP_WITH_BALL": "CRUNCH", + "RAISED_LEGS_CRUNCH": "CRUNCH", + "REVERSE_CRUNCH": "CRUNCH", + "WEIGHTED_REVERSE_CRUNCH": "CRUNCH", + "REVERSE_CRUNCH_ON_A_BENCH": "CRUNCH", + "REVERSE_CURL_AND_LIFT": "CRUNCH", + "ROTATIONAL_LIFT": "CRUNCH", + "SEATED_ALTERNATING_REVERSE_CRUNCH": "CRUNCH", + "SEATED_LEG_U": "CRUNCH", + "SIDE_TO_SIDE_CRUNCH_AND_WEAVE": "CRUNCH", + "SINGLE_LEG_REVERSE_CRUNCH": "CRUNCH", + "SKATER_CRUNCH_CROSS": "CRUNCH", + "STANDING_CABLE_CRUNCH": "CRUNCH", + "STANDING_SIDE_CRUNCH": "CRUNCH", + "STEP_CLIMB": "CRUNCH", + "STRAIGHT_LEG_CRUNCH": "CRUNCH", + "SWISS_BALL_CRUNCH_EXERCISE": "CRUNCH", + "SWISS_BALL_REVERSE_CRUNCH": "CRUNCH", + "SWISS_BALL_SIDE_CRUNCH": "CRUNCH", + "THORACIC_CRUNCHES_ON_FOAM_ROLLER": "CRUNCH", + "TOES_TO_BAR": "CRUNCH", + "TRICEPS_CRUNCH": "CRUNCH", + "WEIGHTED_CROSSOVER_CRUNCH": "CRUNCH", + "WEIGHTED_DOUBLE_CRUNCH": "CRUNCH", + "OBLIQUE_CRUNCH": "CRUNCH", + "VERTICAL_LEG_CRUNCH": "CRUNCH", + "LONG_ARM_CRUNCH": "CRUNCH", + "DEAD_BUG_CRUNCH": "CRUNCH", + # ========================================================================= + # SIT_UP Category + # ========================================================================= + "ALTERNATING_SIT_UP": "SIT_UP", + "BENT_KNEE_V_UP": "SIT_UP", + "BUTTERFLY_SIT_UP": "SIT_UP", + "CROSS_PUNCH_ROLL_UP": "SIT_UP", + "CROSSED_ARMS_SIT_UP": "SIT_UP", + "GET_UP_SIT_UP": "SIT_UP", + "HOVERING_SIT_UP": "SIT_UP", + "KETTLEBELL_SIT_UP": "SIT_UP", + "MEDICINE_BALL_ALTERNATING_V_UP": "SIT_UP", + "MEDICINE_BALL_SIT_UP": "SIT_UP", + "MEDICINE_BALL_V_UP": "SIT_UP", + "MODIFIED_SIT_UP": "SIT_UP", + "NEGATIVE_SIT_UP": "SIT_UP", + "ONE_ARM_FULL_SIT_UP": "SIT_UP", + "RECLINING_CIRCLE": "SIT_UP", + "REVERSE_CURL_UP": "SIT_UP", + "ROLLING_LIKE_BALL_PULLOVER": "SIT_UP", + "SINGLE_LEG_SWISS_BALL_JACKKNIFE": "SIT_UP", + "SIT_UP": "SIT_UP", + "WEIGHTED_SIT_UP": "SIT_UP", + "SLOW_BICYCLE": "SIT_UP", + "THE_CLIMB": "SIT_UP", + "V_UP_SIT_UP": "SIT_UP", + "WEIGHTED_BENT_KNEE_V_UP": "SIT_UP", + "WEIGHTED_BUTTERFLY_SIT_UP": "SIT_UP", + "WEIGHTED_MEDICINE_BALL_SIT_UP": "SIT_UP", + "WEIGHTED_MEDICINE_BALL_V_UP": "SIT_UP", + "WEIGHTED_RECLINING_CIRCLE": "SIT_UP", + "WEIGHTED_V_UP_SIT_UP": "SIT_UP", + "X_ABS": "SIT_UP", + "GHD_SIT_UP": "SIT_UP", + "ANCHORED_SIT_UP": "SIT_UP", + "DECLINE_SIT_UP": "SIT_UP", + # ========================================================================= + # LEG_RAISE Category + # ========================================================================= + "CAPTAIN_CHAIR_LEG_RAISE": "LEG_RAISE", + "FLAT_BENCH_HIP_LIFT": "LEG_RAISE", + "HANGING_LEG_RAISE": "LEG_RAISE", + "WEIGHTED_HANGING_LEG_RAISE": "LEG_RAISE", + "HIP_LIFT": "LEG_RAISE", + "HIP_LIFT_WITH_LEG_RAISE": "LEG_RAISE", + "KNEE_HIP_RAISE": "LEG_RAISE", + "LEG_RAISE": "LEG_RAISE", + "WEIGHTED_LEG_RAISE": "LEG_RAISE", + "LEG_RAISE_ON_FOAM_ROLLER": "LEG_RAISE", + "LYING_CABLE_HIP_RAISE": "LEG_RAISE", + "LYING_KNEE_RAISE": "LEG_RAISE", + "LYING_LEG_RAISE": "LEG_RAISE", + "LYING_LEG_RAISE_TO_HIP_LIFT": "LEG_RAISE", + "LYING_LEG_RAISE_TO_HIP_RAISE": "LEG_RAISE", + "PARALLEL_BAR_KNEE_RAISE": "LEG_RAISE", + "PARALLEL_BAR_LEG_RAISE": "LEG_RAISE", + "REVERSE_HIP_LIFT": "LEG_RAISE", + "REVERSE_HIP_RAISE": "LEG_RAISE", + "SEATED_HIP_RAISE": "LEG_RAISE", + "SINGLE_LEG_HIP_RAISE": "LEG_RAISE", + "STEP_THROUGH": "LEG_RAISE", + "STRAIGHT_LEG_HIP_RAISE": "LEG_RAISE", + "SUSPENDED_KNEE_RAISE": "LEG_RAISE", + "SWISS_BALL_LEG_RAISE": "LEG_RAISE", + "SWISS_BALL_REVERSE_HIP_RAISE": "LEG_RAISE", + "V_RAISE": "LEG_RAISE", + "WINDSHIELD_WIPER_LEG_RAISE": "LEG_RAISE", + "FLUTTER_KICKS": "LEG_RAISE", + "SCISSOR_KICKS": "LEG_RAISE", + "HANGING_STRAIGHT_LEG_RAISE": "LEG_RAISE", + "LYING_SIDE_LEG_RAISE": "LEG_RAISE", + # ========================================================================= + # LEG_CURL Category (Hamstrings) + # ========================================================================= + "BALL_DIG": "LEG_CURL", + "BAND_GOOD_MORNING": "LEG_CURL", + "BARBELL_GOOD_MORNING": "LEG_CURL", + "BARBELL_STRAIGHT_LEG_GOOD_MORNING": "LEG_CURL", + "GOOD_MORNING": "LEG_CURL", + "WEIGHTED_GOOD_MORNING": "LEG_CURL", + "LEG_CURL": "LEG_CURL", + "WEIGHTED_LEG_CURL": "LEG_CURL", + "LYING_LEG_CURL": "LEG_CURL", + "LYING_LEG_CURL_EXERCISE": "LEG_CURL", + "NORDIC_HAMSTRING_CURL": "LEG_CURL", + "SEATED_BARBELL_GOOD_MORNING": "LEG_CURL", + "SEATED_LEG_CURL": "LEG_CURL", + "SINGLE_LEG_BARBELL_GOOD_MORNING": "LEG_CURL", + "SINGLE_LEG_SLIDING_LEG_CURL": "LEG_CURL", + "SLIDING_LEG_CURL": "LEG_CURL", + "SPLIT_BARBELL_GOOD_MORNING": "LEG_CURL", + "SPLIT_STANCE_EXTENSION": "LEG_CURL", + "STANDING_LEG_CURL": "LEG_CURL", + "STAGGERED_STANCE_GOOD_MORNING": "LEG_CURL", + "SWISS_BALL_HIP_RAISE_AND_LEG_CURL": "LEG_CURL", + "ZERCHER_GOOD_MORNING": "LEG_CURL", + "HAMSTRING_CURL": "LEG_CURL", + "PRONE_LEG_CURL": "LEG_CURL", + "GLUTE_HAM_RAISE": "LEG_CURL", + # ========================================================================= + # CALF_RAISE Category + # ========================================================================= + "BARBELL_CALF_RAISE": "CALF_RAISE", + "CALF_PRESS_ON_LEG_PRESS": "CALF_RAISE", + "CALF_RAISE": "CALF_RAISE", + "WEIGHTED_CALF_RAISE": "CALF_RAISE", + "DONKEY_CALF_RAISE": "CALF_RAISE", + "DUMBBELL_CALF_RAISE": "CALF_RAISE", + "DUMBBELL_SEATED_CALF_RAISE": "CALF_RAISE", + "DUMBBELL_SINGLE_LEG_CALF_RAISE": "CALF_RAISE", + "LEG_PRESS_CALF_RAISE": "CALF_RAISE", + "SEATED_CALF_RAISE": "CALF_RAISE", + "WEIGHTED_SEATED_CALF_RAISE": "CALF_RAISE", + "SEATED_DUMBBELL_TOE_RAISE": "CALF_RAISE", + "SINGLE_LEG_BENT_KNEE_CALF_RAISE": "CALF_RAISE", + "SINGLE_LEG_CALF_RAISE": "CALF_RAISE", + "SINGLE_LEG_DECLINE_PUSH_UP": "CALF_RAISE", + "SINGLE_LEG_DONKEY_CALF_RAISE": "CALF_RAISE", + "SINGLE_LEG_HIP_RAISE_WITH_KNEE_HOLD": "CALF_RAISE", + "SINGLE_LEG_STANDING_CALF_RAISE": "CALF_RAISE", + "SMITH_MACHINE_CALF_RAISE": "CALF_RAISE", + "STANDING_CALF_RAISE": "CALF_RAISE", + "WEIGHTED_STANDING_CALF_RAISE": "CALF_RAISE", + "STANDING_DUMBBELL_CALF_RAISE": "CALF_RAISE", + "TIBIALIS_RAISE": "CALF_RAISE", + # ========================================================================= + # HIP_RAISE Category (Glutes) + # ========================================================================= + "BARBELL_HIP_THRUST": "HIP_RAISE", + "BARBELL_HIP_THRUST_ON_FLOOR": "HIP_RAISE", + "BARBELL_HIP_THRUST_WITH_BENCH": "HIP_RAISE", + "BENT_KNEE_SWISS_BALL_REVERSE_HIP_RAISE": "HIP_RAISE", + "BANDED_GLUTE_BRIDGE": "HIP_RAISE", + "BODY_WEIGHT_GLUTE_BRIDGE": "HIP_RAISE", + "BRIDGE": "HIP_RAISE", + "WEIGHTED_BRIDGE": "HIP_RAISE", + "DUMBBELL_SINGLE_LEG_GLUTE_BRIDGE": "HIP_RAISE", + "ELEVATED_SINGLE_LEG_HIP_THRUST": "HIP_RAISE", + "FLOOR_GLUTE_HAM_RAISE": "HIP_RAISE", + "GLUTE_BRIDGE": "HIP_RAISE", + "WEIGHTED_GLUTE_BRIDGE": "HIP_RAISE", + "GLUTE_BRIDGE_HOLD": "HIP_RAISE", + "GLUTE_BRIDGE_MARCH": "HIP_RAISE", + "GLUTE_BRIDGE_ON_BENCH": "HIP_RAISE", + "GLUTE_BRIDGE_WITH_LEG_LIFT": "HIP_RAISE", + "HIP_RAISE": "HIP_RAISE", + "WEIGHTED_HIP_RAISE": "HIP_RAISE", + "HIP_RAISE_WITH_FEET_ON_SWISS_BALL": "HIP_RAISE", + "HIP_RAISE_WITH_HEAD_ON_SWISS_BALL": "HIP_RAISE", + "HIP_RAISE_WITH_KNEES_SQUEEZED": "HIP_RAISE", + "HIP_THRUST": "HIP_RAISE", + "WEIGHTED_HIP_THRUST": "HIP_RAISE", + "HIP_THRUST_AND_LEG_RAISE": "HIP_RAISE", + "INCLINE_REAR_LEG_EXTENSION": "HIP_RAISE", + "KETTLEBELL_SWING": "HIP_RAISE", + "MARCHING_HIP_RAISE": "HIP_RAISE", + "WEIGHTED_MARCHING_HIP_RAISE": "HIP_RAISE", + "MARCHING_HIP_RAISE_WITH_FEET_ON_A_SWISS_BALL": "HIP_RAISE", + "SINGLE_LEG_ELEVATED_FEET_HIP_RAISE": "HIP_RAISE", + "SINGLE_LEG_GLUTE_BRIDGE": "HIP_RAISE", + "SINGLE_LEG_HIP_THRUST": "HIP_RAISE", + "SINGLE_LEG_SWISS_BALL_HIP_RAISE_AND_LEG_CURL": "HIP_RAISE", + "SWISS_BALL_HIP_RAISE": "HIP_RAISE", + "FROG_PUMP": "HIP_RAISE", + "CABLE_PULL_THROUGH": "HIP_RAISE", + # ========================================================================= + # HIP_STABILITY Category + # ========================================================================= + "BAND_SIDE_LYING_LEG_RAISE": "HIP_STABILITY", + "CLAMSHELL": "HIP_STABILITY", + "DEAD_BUG_HIP_STABILITY": "HIP_STABILITY", + "EXTERNAL_HIP_RAISE": "HIP_STABILITY", + "HIP_CIRCLES": "HIP_STABILITY", + "HIP_STABILITY": "HIP_STABILITY", + "WEIGHTED_HIP_STABILITY": "HIP_STABILITY", + "INNER_THIGH_LIFT": "HIP_STABILITY", + "LATERAL_WALKS_WITH_BAND_AT_ANKLES": "HIP_STABILITY", + "LYING_HIP_ABDUCTION": "HIP_STABILITY", + "LYING_SIDE_LEG_RAISE_HIP_STABILITY": "HIP_STABILITY", + "PRETZEL_SIDE_KICK": "HIP_STABILITY", + "PRONE_HIP_INTERNAL_ROTATION": "HIP_STABILITY", + "QUADRUPED": "HIP_STABILITY", + "QUADRUPED_HIP_EXTENSION": "HIP_STABILITY", + "QUADRUPED_WITH_LEG_LIFT": "HIP_STABILITY", + "SIDE_LYING_LEG_RAISE": "HIP_STABILITY", + "STANDING_HIP_ABDUCTION": "HIP_STABILITY", + "SUPINE_HIP_INTERNAL_ROTATION": "HIP_STABILITY", + "BANDED_CLAMSHELL": "HIP_STABILITY", + "BANDED_LATERAL_WALK": "HIP_STABILITY", + "BANDED_MONSTER_WALK": "HIP_STABILITY", + "CABLE_HIP_ABDUCTION": "HIP_STABILITY", + "CABLE_HIP_ADDUCTION": "HIP_STABILITY", + "HIP_ABDUCTION_MACHINE": "HIP_STABILITY", + "HIP_ADDUCTION_MACHINE": "HIP_STABILITY", + # ========================================================================= + # FLYE Category + # ========================================================================= + "CABLE_CROSSOVER": "FLYE", + "CABLE_FLY": "FLYE", + "DECLINE_DUMBBELL_FLYE": "FLYE", + "DUMBBELL_FLYE": "FLYE", + "FLYE": "FLYE", + "WEIGHTED_FLYE": "FLYE", + "INCLINE_DUMBBELL_FLYE": "FLYE", + "INCLINE_REVERSE_FLY": "FLYE", + "KETTLEBELL_FLYE": "FLYE", + "KNEELING_REAR_FLY": "FLYE", + "MACHINE_FLY": "FLYE", + "PEC_DECK": "FLYE", + "REVERSE_FLY": "FLYE", + "SINGLE_ARM_STANDING_CABLE_REVERSE_FLY": "FLYE", + "SWISS_BALL_DUMBBELL_FLYE": "FLYE", + "CHEST_FLY": "FLYE", + "CABLE_CHEST_FLY": "FLYE", + "HIGH_CABLE_FLY": "FLYE", + "LOW_CABLE_FLY": "FLYE", + # ========================================================================= + # SHRUG Category + # ========================================================================= + "BARBELL_JUMP_SHRUG": "SHRUG", + "BARBELL_SHRUG": "SHRUG", + "BARBELL_UPRIGHT_ROW": "SHRUG", + "BEHIND_THE_BACK_BARBELL_SHRUG": "SHRUG", + "BEHIND_THE_BACK_SMITH_MACHINE_SHRUG": "SHRUG", + "CABLE_SHRUG": "SHRUG", + "DUMBBELL_JUMP_SHRUG": "SHRUG", + "DUMBBELL_SHRUG": "SHRUG", + "DUMBBELL_UPRIGHT_ROW": "SHRUG", + "INCLINE_DUMBBELL_SHRUG": "SHRUG", + "OVERHEAD_BARBELL_SHRUG": "SHRUG", + "OVERHEAD_DUMBBELL_SHRUG": "SHRUG", + "SCAPTION_AND_SHRUG_SHRUG": "SHRUG", + "SCAPULAR_RETRACTION": "SHRUG", + "SERRATUS_CHAIR_SHRUG": "SHRUG", + "SERRATUS_SHRUG": "SHRUG", + "SHRUG": "SHRUG", + "WEIGHTED_SHRUG": "SHRUG", + "WIDE_GRIP_JUMP_SHRUG": "SHRUG", + "TRAP_BAR_SHRUG": "SHRUG", + # ========================================================================= + # HYPEREXTENSION Category (Back Extension) + # ========================================================================= + "BACK_EXTENSION_WITH_OPPOSITE_ARM_AND_LEG_REACH": "HYPEREXTENSION", + "BASE_ROTATIONS": "HYPEREXTENSION", + "BENT_KNEE_REVERSE_HYPEREXTENSION": "HYPEREXTENSION", + "DOUBLE_LEG_HIP_EXTENSION_AND_CROSS": "HYPEREXTENSION", + "GLUTE_FOCUSED_BACK_EXTENSION": "HYPEREXTENSION", + "HYPEREXTENSION": "HYPEREXTENSION", + "WEIGHTED_HYPEREXTENSION": "HYPEREXTENSION", + "PRONE_COBRA": "HYPEREXTENSION", + "PRONE_FLUTTER_KICKS": "HYPEREXTENSION", + "REVERSE_HYPEREXTENSION": "HYPEREXTENSION", + "WEIGHTED_REVERSE_HYPEREXTENSION": "HYPEREXTENSION", + "SUPERMAN_FROM_FLOOR": "HYPEREXTENSION", + "SWISS_BALL_BACK_EXTENSION_WITH_ROTATION": "HYPEREXTENSION", + "SWISS_BALL_HYPEREXTENSION": "HYPEREXTENSION", + "SWISS_BALL_OPPOSITE_ARM_AND_LEG_LIFT": "HYPEREXTENSION", + "BACK_EXTENSION": "HYPEREXTENSION", + "WEIGHTED_BACK_EXTENSION": "HYPEREXTENSION", + "ROMAN_CHAIR_BACK_EXTENSION": "HYPEREXTENSION", + "GHD_BACK_EXTENSION": "HYPEREXTENSION", + # ========================================================================= + # OLYMPIC_LIFT Category + # ========================================================================= + "ALTERNATING_HANG_PULL": "OLYMPIC_LIFT", + "ALTERNATING_SINGLE_ARM_HANG_CLEAN": "OLYMPIC_LIFT", + "BARBELL_CLEAN_AND_JERK": "OLYMPIC_LIFT", + "BARBELL_HANG_CLEAN": "OLYMPIC_LIFT", + "BARBELL_HANG_PULL": "OLYMPIC_LIFT", + "BARBELL_HANG_SNATCH": "OLYMPIC_LIFT", + "BARBELL_HIGH_PULL": "OLYMPIC_LIFT", + "BARBELL_MUSCLE_SNATCH": "OLYMPIC_LIFT", + "BARBELL_POWER_CLEAN": "OLYMPIC_LIFT", + "BARBELL_POWER_SNATCH": "OLYMPIC_LIFT", + "BARBELL_SNATCH": "OLYMPIC_LIFT", + "BARBELL_SPLIT_JERK": "OLYMPIC_LIFT", + "CLEAN": "OLYMPIC_LIFT", + "CLEAN_AND_JERK": "OLYMPIC_LIFT", + "CLEAN_AND_PRESS": "OLYMPIC_LIFT", + "DUMBBELL_CLEAN": "OLYMPIC_LIFT", + "DUMBBELL_CLEAN_AND_JERK": "OLYMPIC_LIFT", + "DUMBBELL_HANG_CLEAN": "OLYMPIC_LIFT", + "DUMBBELL_HANG_PULL": "OLYMPIC_LIFT", + "DUMBBELL_HANG_SNATCH": "OLYMPIC_LIFT", + "DUMBBELL_HIGH_PULL": "OLYMPIC_LIFT", + "DUMBBELL_POWER_CLEAN": "OLYMPIC_LIFT", + "DUMBBELL_SNATCH": "OLYMPIC_LIFT", + "DUMBBELL_SPLIT_ALTERNATE_FOOT_SNATCH": "OLYMPIC_LIFT", + "DUMBBELL_SPLIT_JERK": "OLYMPIC_LIFT", + "HANG_CLEAN": "OLYMPIC_LIFT", + "HANG_PULL": "OLYMPIC_LIFT", + "HANG_SNATCH": "OLYMPIC_LIFT", + "HIGH_PULL": "OLYMPIC_LIFT", + "KETTLEBELL_HANG_PULL": "OLYMPIC_LIFT", + "KETTLEBELL_SNATCH": "OLYMPIC_LIFT", + "KETTLEBELL_SWING_SNATCH": "OLYMPIC_LIFT", + "MED_BALL_CLEAN": "OLYMPIC_LIFT", + "MUSCLE_CLEAN": "OLYMPIC_LIFT", + "MUSCLE_SNATCH": "OLYMPIC_LIFT", + "ONE_HAND_DUMBBELL_SPLIT_SNATCH": "OLYMPIC_LIFT", + "OVERHEAD_SQUAT_TO_SPLIT_SNATCH": "OLYMPIC_LIFT", + "POWER_CLEAN": "OLYMPIC_LIFT", + "POWER_JERK": "OLYMPIC_LIFT", + "POWER_SNATCH": "OLYMPIC_LIFT", + "SINGLE_ARM_DUMBBELL_SNATCH": "OLYMPIC_LIFT", + "SINGLE_ARM_HANG_SNATCH": "OLYMPIC_LIFT", + "SINGLE_ARM_KETTLEBELL_SNATCH": "OLYMPIC_LIFT", + "SNATCH": "OLYMPIC_LIFT", + "SPLIT_JERK": "OLYMPIC_LIFT", + "SQUAT_CLEAN": "OLYMPIC_LIFT", + "SQUAT_SNATCH": "OLYMPIC_LIFT", + "THRUSTER": "OLYMPIC_LIFT", + "JERK": "OLYMPIC_LIFT", + "PUSH_JERK": "OLYMPIC_LIFT", + "CLUSTER": "OLYMPIC_LIFT", + # ========================================================================= + # PLYO Category (Plyometrics) + # ========================================================================= + "ALTERNATING_JUMP_LUNGE": "PLYO", + "BARBELL_JUMP_SQUAT": "PLYO", + "BOUNDING": "PLYO", + "BOX_JUMP": "PLYO", + "WEIGHTED_BOX_JUMP": "PLYO", + "BOX_JUMP_OVERS": "PLYO", + "BROAD_JUMP": "PLYO", + "BURPEE": "PLYO", + "BURPEE_BOX_JUMP": "PLYO", + "BURPEE_BOX_JUMP_OVER": "PLYO", + "BURPEE_OVER_BAR": "PLYO", + "DEPTH_JUMP": "PLYO", + "DUMBBELL_JUMP_SQUAT": "PLYO", + "DUMBBELL_SPLIT_JUMP": "PLYO", + "FRONT_KNEE_STRIKE": "PLYO", + "HIGH_BOX_JUMP": "PLYO", + "ISOMETRIC_EXPLOSIVE_BODY_WEIGHT_JUMP_SQUAT": "PLYO", + "JUMP_SQUAT": "PLYO", + "WEIGHTED_JUMP_SQUAT": "PLYO", + "KNEE_TUCK_JUMP": "PLYO", + "LATERAL_BOX_JUMP": "PLYO", + "LATERAL_JUMP": "PLYO", + "WEIGHTED_LATERAL_JUMP": "PLYO", + "LATERAL_LEAP_AND_HOP": "PLYO", + "LATERAL_PLYO_SQUATS": "PLYO", + "LATERAL_SLIDE": "PLYO", + "LONG_JUMP": "PLYO", + "MEDICINE_BALL_SLAM_PLYO": "PLYO", + "PLYOMETRIC_PUSH_UP": "PLYO", + "POWER_DROP": "PLYO", + "PUSH_UP_JACK": "PLYO", + "SINGLE_ARM_MED_BALL_SLAM": "PLYO", + "SINGLE_LEG_BOX_JUMP": "PLYO", + "SINGLE_LEG_DEPTH_JUMP": "PLYO", + "SINGLE_LEG_JUMP": "PLYO", + "SINGLE_LEG_LATERAL_HOP": "PLYO", + "SINGLE_LEG_LATERAL_TO_MEDIAL_HOP": "PLYO", + "SINGLE_LEG_TRIPLE_HOP": "PLYO", + "SKATER_JUMP": "PLYO", + "SQUAT_JUMP": "PLYO", + "SQUAT_JUMP_TO_BOX": "PLYO", + "STANDING_BROAD_JUMP": "PLYO", + "STANDING_LONG_JUMP": "PLYO", + "TRIPLE_JUMP": "PLYO", + "TUCK_JUMP": "PLYO", + "WEIGHTED_TUCK_JUMP": "PLYO", + "VERTICAL_JUMP": "PLYO", + "PLYO_PUSH_UP": "PLYO", + "STAR_JUMP": "PLYO", + "SPLIT_JUMP": "PLYO", + "POGO_HOP": "PLYO", + "ANKLE_HOP": "PLYO", + # ========================================================================= + # CHOP Category + # ========================================================================= + "CABLE_PULL_THROUGH_CHOP": "CHOP", + "CABLE_ROTATIONAL_LIFT": "CHOP", + "CABLE_WOODCHOP_CHOP": "CHOP", + "CHOP": "CHOP", + "WEIGHTED_CHOP": "CHOP", + "CROSS_CHOP_TO_KNEE": "CHOP", + "DUMBBELL_CHOP": "CHOP", + "HALF_KNEELING_ROTATION": "CHOP", + "HALF_KNEELING_ROTATIONAL_CHOP": "CHOP", + "HALF_KNEELING_ROTATIONAL_REVERSE_CHOP": "CHOP", + "HALF_KNEELING_STABILITY_CHOP": "CHOP", + "HALF_KNEELING_STABILITY_REVERSE_CHOP": "CHOP", + "KNEELING_ROTATIONAL_CHOP": "CHOP", + "KNEELING_ROTATIONAL_REVERSE_CHOP": "CHOP", + "KNEELING_STABILITY_CHOP": "CHOP", + "KNEELING_WOODCHOPPER": "CHOP", + "MEDICINE_BALL_WOODCHOPS": "CHOP", + "POWER_SQUAT_CHOPS": "CHOP", + "STANDING_ROTATIONAL_CHOP_CHOP": "CHOP", + "STANDING_SPLIT_ROTATIONAL_CHOP": "CHOP", + "STANDING_SPLIT_ROTATIONAL_REVERSE_CHOP": "CHOP", + "STANDING_STABILITY_REVERSE_CHOP": "CHOP", + "WOOD_CHOP": "CHOP", + "WEIGHTED_WOOD_CHOP": "CHOP", + # ========================================================================= + # CARRY Category + # ========================================================================= + "BAR_HOLDS": "CARRY", + "CARRY": "CARRY", + "WEIGHTED_CARRY": "CARRY", + "FARMERS_CARRY": "CARRY", + "FARMERS_WALK_CARRY": "CARRY", + "FARMERS_WALK_ON_TOES": "CARRY", + "HEX_DUMBBELL_HOLD": "CARRY", + "KETTLEBELL_CARRY": "CARRY", + "OVERHEAD_CARRY": "CARRY", + "OVERHEAD_FARMERS_WALK": "CARRY", + "OVERHEAD_WALK": "CARRY", + "SANDBAG_CARRY": "CARRY", + "SLED_DRAG": "CARRY", + "SUITCASE_CARRY_CARRY": "CARRY", + "TRAP_BAR_CARRY": "CARRY", + "YOKE_CARRY": "CARRY", + "RACK_CARRY": "CARRY", + "ZERCHER_CARRY": "CARRY", + "FRONT_RACK_CARRY": "CARRY", + "WAITER_CARRY": "CARRY", + "CROSS_BODY_CARRY": "CARRY", + # ========================================================================= + # HIP_SWING Category + # ========================================================================= + "HIP_SWING": "HIP_SWING", + "WEIGHTED_HIP_SWING": "HIP_SWING", + "INSIDE_OUT": "HIP_SWING", + "SINGLE_ARM_SWING": "HIP_SWING", + "SINGLE_LEG_HIP_SWING": "HIP_SWING", + "STEP_OUT_SWING": "HIP_SWING", + "SWING": "HIP_SWING", + "KETTLEBELL_HIP_SWING": "HIP_SWING", + "AMERICAN_KETTLEBELL_SWING": "HIP_SWING", + "RUSSIAN_KETTLEBELL_SWING": "HIP_SWING", + "ALTERNATING_KETTLEBELL_SWING": "HIP_SWING", + "SINGLE_ARM_KETTLEBELL_SWING": "HIP_SWING", + # ========================================================================= + # SHOULDER_STABILITY Category + # ========================================================================= + "90_DEGREE_CABLE_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "BAND_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "BAND_INTERNAL_ROTATION": "SHOULDER_STABILITY", + "BENT_ARM_LATERAL_RAISE_AND_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "CABLE_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "DUMBBELL_FACE_PULL_WITH_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "FLOOR_I_RAISE": "SHOULDER_STABILITY", + "FLOOR_T_RAISE": "SHOULDER_STABILITY", + "FLOOR_Y_RAISE": "SHOULDER_STABILITY", + "INCLINE_I_RAISE": "SHOULDER_STABILITY", + "INCLINE_L_RAISE": "SHOULDER_STABILITY", + "INCLINE_T_RAISE": "SHOULDER_STABILITY", + "INCLINE_W_RAISE": "SHOULDER_STABILITY", + "INCLINE_Y_RAISE": "SHOULDER_STABILITY", + "LYING_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "LYING_FLOOR_Y_RAISE": "SHOULDER_STABILITY", + "PLANK_I_RAISE": "SHOULDER_STABILITY", + "PRONE_I_RAISE": "SHOULDER_STABILITY", + "PRONE_L_RAISE": "SHOULDER_STABILITY", + "PRONE_T_RAISE": "SHOULDER_STABILITY", + "PRONE_W_RAISE": "SHOULDER_STABILITY", + "PRONE_Y_RAISE": "SHOULDER_STABILITY", + "QUADRUPED_I_RAISE": "SHOULDER_STABILITY", + "QUADRUPED_Y_RAISE": "SHOULDER_STABILITY", + "SEATED_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "SHOULDER_STABILITY": "SHOULDER_STABILITY", + "WEIGHTED_SHOULDER_STABILITY": "SHOULDER_STABILITY", + "SIDE_LYING_EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "STANDING_L_RAISE": "SHOULDER_STABILITY", + "SWISS_BALL_I_RAISE": "SHOULDER_STABILITY", + "SWISS_BALL_T_RAISE": "SHOULDER_STABILITY", + "SWISS_BALL_W_RAISE": "SHOULDER_STABILITY", + "SWISS_BALL_Y_RAISE": "SHOULDER_STABILITY", + "EXTERNAL_ROTATION": "SHOULDER_STABILITY", + "INTERNAL_ROTATION": "SHOULDER_STABILITY", + "CUBAN_ROTATION": "SHOULDER_STABILITY", + "BAND_PULL_APART": "SHOULDER_STABILITY", + "FACE_PULL_STABILITY": "SHOULDER_STABILITY", + # ========================================================================= + # TOTAL_BODY Category + # ========================================================================= + "BURPEE_TOTAL_BODY": "TOTAL_BODY", + "WEIGHTED_BURPEE": "TOTAL_BODY", + "BARBELL_BURPEE": "TOTAL_BODY", + "BOX_JUMP_BURPEE": "TOTAL_BODY", + "DUMBBELL_BURPEE": "TOTAL_BODY", + "JUMPING_JACK": "TOTAL_BODY", + "MAN_MAKER": "TOTAL_BODY", + "MODIFIED_BURPEE": "TOTAL_BODY", + "PULL_BURPEE": "TOTAL_BODY", + "TURKISH_GET_UP_TOTAL_BODY": "TOTAL_BODY", + "WALL_BALL_BURPEE": "TOTAL_BODY", + "HIGH_KNEES": "TOTAL_BODY", + "BUTT_KICKS": "TOTAL_BODY", + "JUMPING_JACKS": "TOTAL_BODY", + "DEVIL_PRESS": "TOTAL_BODY", + "BURPEE_TO_BOX_JUMP": "TOTAL_BODY", + "BATTLE_ROPE": "TOTAL_BODY", + "BATTLE_ROPE_SLAM": "TOTAL_BODY", + "BATTLE_ROPE_WAVE": "TOTAL_BODY", + # ========================================================================= + # WARM_UP Category + # ========================================================================= + "ARM_SWINGS": "WARM_UP", + "LEG_SWINGS": "WARM_UP", + "HIP_CIRCLES_WARM_UP": "WARM_UP", + "TORSO_TWIST": "WARM_UP", + "WALKING_HIGH_KNEES": "WARM_UP", + "WALKING_QUAD_STRETCH": "WARM_UP", + "WALKING_TOE_TOUCH": "WARM_UP", + "WORLD_GREATEST_STRETCH": "WARM_UP", + "INCHWORM_WARM_UP": "WARM_UP", + "CAT_COW": "WARM_UP", + "DOWNWARD_DOG": "WARM_UP", + "HIP_OPENER": "WARM_UP", + "THORACIC_ROTATION": "WARM_UP", + "ANKLE_CIRCLES": "WARM_UP", + "WRIST_CIRCLES": "WARM_UP", + "NECK_CIRCLES": "WARM_UP", + "SHOULDER_CIRCLES": "WARM_UP", + "DYNAMIC_STRETCH": "WARM_UP", + "WARM_UP": "WARM_UP", + # ========================================================================= + # CARDIO Category + # ========================================================================= + "BIKE": "CARDIO", + "RUN": "CARDIO", + "ROWING": "CARDIO", + "ELLIPTICAL": "CARDIO", + "STAIR_CLIMBER": "CARDIO", + "SKI_ERG": "CARDIO", + "ASSAULT_BIKE": "CARDIO", + "AIR_BIKE": "CARDIO", + "SPIN_BIKE": "CARDIO", + "TREADMILL": "CARDIO", + "STATIONARY_BIKE": "CARDIO", + "RECUMBENT_BIKE": "CARDIO", + "JUMP_ROPE": "CARDIO", + "DOUBLE_UNDERS": "CARDIO", + "SINGLE_UNDERS": "CARDIO", + "SPRINTS": "CARDIO", + "CARDIO": "CARDIO", + # ========================================================================= + # SUSPENSION Category (TRX) + # ========================================================================= + "TRX_BICEPS_CURL": "SUSPENSION", + "TRX_CHEST_PRESS": "SUSPENSION", + "TRX_FACE_PULL": "SUSPENSION", + "TRX_HAMSTRING_CURL": "SUSPENSION", + "TRX_HIP_PRESS": "SUSPENSION", + "TRX_MOUNTAIN_CLIMBER": "SUSPENSION", + "TRX_PIKE": "SUSPENSION", + "TRX_PLANK": "SUSPENSION", + "TRX_PULL_UP": "SUSPENSION", + "TRX_PUSH_UP": "SUSPENSION", + "TRX_ROW": "SUSPENSION", + "TRX_SINGLE_LEG_SQUAT": "SUSPENSION", + "TRX_SQUAT": "SUSPENSION", + "TRX_TRICEPS_EXTENSION": "SUSPENSION", + "TRX_Y_FLY": "SUSPENSION", + "SUSPENSION_TRAINING": "SUSPENSION", + # ========================================================================= + # OTHER / MISCELLANEOUS + # ========================================================================= + "REST": "REST", + "RECOVERY": "RECOVERY", + "STRETCH": "FLEXIBILITY", + "FOAM_ROLL": "FLEXIBILITY", + "MOBILITY": "FLEXIBILITY", + "YOGA": "FLEXIBILITY", + "PILATES": "FLEXIBILITY", +} + +# ============================================================================= +# COMMON ALIASES +# ============================================================================= +# Maps common user terms to their preferred Garmin exercise name +# Used to resolve ambiguous terms to the most common variant +# ============================================================================= + +ALIASES: Dict[str, str] = { + # ========================================================================= + # Ambiguous terms → Most common variant (usually barbell) + # ========================================================================= + "bench press": "BARBELL_BENCH_PRESS", + "bench": "BARBELL_BENCH_PRESS", + "deadlift": "BARBELL_DEADLIFT", + "squat": "BARBELL_BACK_SQUAT", + "back squat": "BARBELL_BACK_SQUAT", + "front squat": "BARBELL_FRONT_SQUAT", + "overhead press": "BARBELL_SHOULDER_PRESS", + "shoulder press": "BARBELL_SHOULDER_PRESS", + "military press": "BARBELL_SHOULDER_PRESS", + "press": "BARBELL_SHOULDER_PRESS", + "row": "BARBELL_ROW", + "bent over row": "BENT_OVER_ROW_WITH_BARBELL", + "curl": "BARBELL_BICEPS_CURL", + "bicep curl": "BARBELL_BICEPS_CURL", + "biceps curl": "BARBELL_BICEPS_CURL", + "tricep extension": "TRICEPS_EXTENSION", + "lunge": "BARBELL_LUNGE", + "calf raise": "STANDING_CALF_RAISE", + "shrug": "BARBELL_SHRUG", + "upright row": "BARBELL_UPRIGHT_ROW", + "hip thrust": "BARBELL_HIP_THRUST", + "glute bridge": "GLUTE_BRIDGE", + # ========================================================================= + # Common abbreviations and shorthand + # ========================================================================= + "db": "DUMBBELL", + "bb": "BARBELL", + "kb": "KETTLEBELL", + "ez": "EZ_BAR", + "rdl": "ROMANIAN_DEADLIFT", + "sldl": "STRAIGHT_LEG_DEADLIFT", + "ohp": "OVERHEAD_PRESS", + "bp": "BARBELL_BENCH_PRESS", + "dl": "BARBELL_DEADLIFT", + "sq": "BARBELL_BACK_SQUAT", + "cgbp": "CLOSE_GRIP_BARBELL_BENCH_PRESS", + "jm press": "JM_PRESS", + "ghd": "GHD_SIT_UP", + "ghr": "GLUTE_HAM_RAISE", + # ========================================================================= + # Common exercise names (friendly format) + # ========================================================================= + "lat pulldown": "LAT_PULLDOWN", + "pulldown": "LAT_PULLDOWN", + "pull up": "PULL_UP", + "pullup": "PULL_UP", + "pull-up": "PULL_UP", + "chin up": "CHIN_UP", + "chinup": "CHIN_UP", + "chin-up": "CHIN_UP", + "push up": "PUSH_UP", + "pushup": "PUSH_UP", + "push-up": "PUSH_UP", + "dip": "TRICEPS_DIP", + "dips": "TRICEPS_DIP", + "skull crusher": "SKULL_CRUSHER", + "skullcrusher": "SKULL_CRUSHER", + "skull crushers": "SKULL_CRUSHER", + "tricep pushdown": "TRICEPS_PUSHDOWN", + "pushdown": "TRICEPS_PUSHDOWN", + "cable pushdown": "TRICEPS_PUSHDOWN", + "hammer curl": "DUMBBELL_HAMMER_CURL", + "preacher curl": "EZ_BAR_PREACHER_CURL", + "concentration curl": "ONE_ARM_CONCENTRATION_CURL", + "incline curl": "INCLINE_DUMBBELL_BICEPS_CURL", + "cable curl": "CABLE_BICEPS_CURL", + "face pull": "FACE_PULL", + "facepull": "FACE_PULL", + "face pulls": "FACE_PULL", + "lateral raise": "DUMBBELL_LATERAL_RAISE", + "side raise": "DUMBBELL_LATERAL_RAISE", + "side lateral raise": "DUMBBELL_LATERAL_RAISE", + "front raise": "DUMBBELL_FRONT_RAISE", + "rear delt fly": "REAR_DELT_FLY", + "reverse fly": "REVERSE_FLY", + "cable fly": "CABLE_FLY", + "chest fly": "DUMBBELL_FLYE", + "pec deck": "PEC_DECK", + "cable crossover": "CABLE_CROSSOVER", + "seated row": "SEATED_CABLE_ROW", + "cable row": "SEATED_CABLE_ROW", + "t bar row": "T_BAR_ROW", + "t-bar row": "T_BAR_ROW", + "pendlay row": "PENDLAY_ROW", + "leg press": "LEG_PRESS", + "leg curl": "LYING_LEG_CURL", + "hamstring curl": "LYING_LEG_CURL", + "leg extension": "LEG_EXTENSIONS", + "quad extension": "LEG_EXTENSIONS", + "hip abduction": "HIP_ABDUCTION_MACHINE", + "hip adduction": "HIP_ADDUCTION_MACHINE", + "goblet squat": "GOBLET_SQUAT", + "sumo squat": "SUMO_SQUAT", + "hack squat": "BARBELL_HACK_SQUAT", + "box squat": "BARBELL_BOX_SQUAT", + "bulgarian split squat": "DUMBBELL_BULGARIAN_SPLIT_SQUAT", + "split squat": "DUMBBELL_SPLIT_SQUAT", + "step up": "DUMBBELL_STEP_UP", + "step-up": "DUMBBELL_STEP_UP", + "walking lunge": "WALKING_DUMBBELL_LUNGE", + "reverse lunge": "DUMBBELL_REVERSE_LUNGE", + "forward lunge": "DUMBBELL_LUNGE", + "side lunge": "SIDE_LUNGE", + "lateral lunge": "LATERAL_LUNGE", + "good morning": "BARBELL_GOOD_MORNING", + "romanian deadlift": "ROMANIAN_DEADLIFT", + "stiff leg deadlift": "STRAIGHT_LEG_DEADLIFT", + "sumo deadlift": "SUMO_DEADLIFT", + "trap bar deadlift": "TRAP_BAR_DEADLIFT", + "rack pull": "RACK_PULL", + "clean": "BARBELL_POWER_CLEAN", + "power clean": "BARBELL_POWER_CLEAN", + "hang clean": "BARBELL_HANG_CLEAN", + "snatch": "BARBELL_SNATCH", + "power snatch": "BARBELL_POWER_SNATCH", + "clean and jerk": "BARBELL_CLEAN_AND_JERK", + "thruster": "THRUSTERS", + "thrusters": "THRUSTERS", + "wall ball": "WALL_BALL", + "kettlebell swing": "KETTLEBELL_SWING", + "kb swing": "KETTLEBELL_SWING", + "turkish get up": "TURKISH_GET_UP", + "tgu": "TURKISH_GET_UP", + "farmers walk": "FARMERS_WALK", + "farmers carry": "FARMERS_CARRY", + "suitcase carry": "SUITCASE_CARRY", + "plank": "PLANK", + "side plank": "SIDE_PLANK", + "mountain climber": "MOUNTAIN_CLIMBER", + "mountain climbers": "MOUNTAIN_CLIMBER", + "burpee": "BURPEE", + "burpees": "BURPEE", + "box jump": "BOX_JUMP", + "box jumps": "BOX_JUMP", + "jump squat": "JUMP_SQUAT", + "jump squats": "JUMP_SQUAT", + "tuck jump": "TUCK_JUMP", + "broad jump": "BROAD_JUMP", + "crunch": "CRUNCH", + "crunches": "CRUNCH", + "sit up": "SIT_UP", + "situp": "SIT_UP", + "sit-up": "SIT_UP", + "leg raise": "HANGING_LEG_RAISE", + "hanging leg raise": "HANGING_LEG_RAISE", + "russian twist": "RUSSIAN_TWIST", + "v up": "V_UP", + "v-up": "V_UP", + "bicycle crunch": "BICYCLE_CRUNCH", + "dead bug": "DEAD_BUG", + "bird dog": "BIRD_DOG", + "superman": "SUPERMAN", + "back extension": "BACK_EXTENSION", + "hyperextension": "HYPEREXTENSION", + "ab wheel": "AB_WHEEL_ROLLOUT", + "ab roller": "AB_WHEEL_ROLLOUT", + "pallof press": "PALLOF_PRESS", + "woodchop": "CABLE_WOODCHOP", + "wood chop": "CABLE_WOODCHOP", + "hollow hold": "HOLLOW_BODY_HOLD", + "hollow body": "HOLLOW_BODY_HOLD", + "arnold press": "ARNOLD_PRESS", + "incline press": "INCLINE_BARBELL_BENCH_PRESS", + "incline bench": "INCLINE_BARBELL_BENCH_PRESS", + "decline press": "DECLINE_DUMBBELL_BENCH_PRESS", + "decline bench": "DECLINE_DUMBBELL_BENCH_PRESS", + "floor press": "BARBELL_FLOOR_PRESS", + "close grip bench": "CLOSE_GRIP_BARBELL_BENCH_PRESS", + "close grip bench press": "CLOSE_GRIP_BARBELL_BENCH_PRESS", + "wide grip bench": "WIDE_GRIP_BARBELL_BENCH_PRESS", + "pause squat": "PAUSE_SQUAT", + "box step up": "BOX_STEP_SQUAT", + "pistol squat": "PISTOL_SQUAT", + "pistol": "PISTOL_SQUAT", + "clamshell": "CLAMSHELL", + "clam": "CLAMSHELL", + "hip circle": "HIP_CIRCLES", + "band walk": "BANDED_LATERAL_WALK", + "monster walk": "BANDED_MONSTER_WALK", + "glute kickback": "QUADRUPED_HIP_EXTENSION", + "donkey kick": "QUADRUPED_HIP_EXTENSION", + "fire hydrant": "FIRE_HYDRANT", + "jumping jack": "JUMPING_JACKS", + "jumping jacks": "JUMPING_JACKS", + "high knees": "HIGH_KNEES", + "butt kicks": "BUTT_KICKS", + "jump rope": "JUMP_ROPE", + "double under": "DOUBLE_UNDERS", + "double unders": "DOUBLE_UNDERS", + "battle rope": "BATTLE_ROPE", + "battle ropes": "BATTLE_ROPE", + "rowing": "ROWING", + "row machine": "ROWING", + "rower": "ROWING", + "bike": "BIKE", + "assault bike": "ASSAULT_BIKE", + "air bike": "AIR_BIKE", + "ski erg": "SKI_ERG", + "elliptical": "ELLIPTICAL", + "treadmill": "TREADMILL", + "stair climber": "STAIR_CLIMBER", + "rest": "REST", + "recovery": "RECOVERY", + "stretch": "STRETCH", + "foam roll": "FOAM_ROLL", +} + +# ============================================================================= +# EQUIPMENT PREFIXES +# ============================================================================= + +EQUIPMENT_PREFIXES: Set[str] = { + "BARBELL", + "DUMBBELL", + "KETTLEBELL", + "CABLE", + "MACHINE", + "BODYWEIGHT", + "BAND", + "MEDICINE_BALL", + "SWISS_BALL", + "EZ_BAR", + "TRAP_BAR", + "SMITH_MACHINE", + "LANDMINE", + "SUSPENSION", + "WEIGHTED", + "ALTERNATING", + "SINGLE_ARM", + "SINGLE_LEG", + "ONE_ARM", + "ONE_LEG", + "SEATED", + "STANDING", + "INCLINE", + "DECLINE", + "FLAT", + "REVERSE", + "WIDE_GRIP", + "CLOSE_GRIP", + "NEUTRAL_GRIP", +} + + +class ExerciseMatcher: + """Fuzzy matcher for Garmin exercise names. + + Converts user-friendly exercise names to Garmin's SCREAMING_SNAKE_CASE format + using a hybrid approach of alias lookup, token matching, and Levenshtein distance. + + Example: + >>> matcher = ExerciseMatcher() + >>> result = matcher.resolve("dumbbell bench press") + >>> result.name + 'DUMBBELL_BENCH_PRESS' + + >>> result = matcher.resolve("dumbell curl") # typo handled + >>> result.name + 'DUMBBELL_BICEPS_CURL' + + >>> results = matcher.search("squat", limit=5) + >>> [r.name for r in results] + ['BARBELL_SQUAT', 'GOBLET_SQUAT', 'FRONT_SQUAT', ...] + """ + + def __init__(self, threshold: float = 0.5) -> None: + """Initialize the exercise matcher. + + Args: + threshold: Minimum confidence score (0-1) to return a match. + Default 0.5 balances precision and recall. + """ + self._threshold = threshold + self._exercises = EXERCISES + self._aliases = ALIASES + self._exercise_names = list(EXERCISES.keys()) + self._normalized_index: Dict[str, str] = {} + self._build_index() + + def _build_index(self) -> None: + """Build normalized index for fast lookup.""" + for name in self._exercise_names: + normalized = self._normalize(name) + self._normalized_index[normalized] = name + + @staticmethod + def _normalize(text: str) -> str: + """Normalize text for matching. + + Converts to lowercase, removes punctuation, replaces spaces with underscores. + """ + text = text.lower().strip() + text = re.sub(r"[^\w\s]", "", text) + text = re.sub(r"\s+", "_", text) + return text + + @staticmethod + def _tokenize(text: str) -> Set[str]: + """Extract tokens from text.""" + normalized = ExerciseMatcher._normalize(text) + return set(normalized.split("_")) + + @staticmethod + def _levenshtein_ratio(s1: str, s2: str) -> float: + """Calculate similarity ratio using SequenceMatcher.""" + return SequenceMatcher(None, s1, s2).ratio() + + def _score_candidate( + self, query_tokens: Set[str], candidate: str, query_normalized: str + ) -> float: + """Score a candidate exercise against the query. + + Uses a combination of: + 1. Exact normalized match + 2. Token overlap (Jaccard similarity) + 3. Fuzzy token matching (Levenshtein) + 4. Overall string similarity + """ + candidate_normalized = self._normalize(candidate) + candidate_tokens = self._tokenize(candidate) + + # Exact match after normalization + if query_normalized == candidate_normalized: + return 1.0 + + # Token overlap score (Jaccard) + intersection = query_tokens & candidate_tokens + union = query_tokens | candidate_tokens + token_score = len(intersection) / len(union) if union else 0 + + # Fuzzy token matching - find best match for each query token + fuzzy_scores = [] + for qt in query_tokens: + if qt in candidate_tokens: + fuzzy_scores.append(1.0) + else: + best = max( + (self._levenshtein_ratio(qt, ct) for ct in candidate_tokens), + default=0, + ) + fuzzy_scores.append(best) + fuzzy_score = sum(fuzzy_scores) / len(fuzzy_scores) if fuzzy_scores else 0 + + # Overall string similarity + string_score = self._levenshtein_ratio(query_normalized, candidate_normalized) + + # Weighted combination + # Token overlap is most important, then fuzzy tokens, then overall string + return (token_score * 0.4) + (fuzzy_score * 0.4) + (string_score * 0.2) + + def resolve( + self, query: str, equipment_hint: Optional[str] = None + ) -> Optional[MatchResult]: + """Find the best matching Garmin exercise name. + + Args: + query: User-provided exercise name (any format) + equipment_hint: Optional equipment to prefer (e.g., "dumbbell") + + Returns: + MatchResult if confidence >= threshold, else None + """ + if not query or not query.strip(): + return None + + query_normalized = self._normalize(query) + query_tokens = self._tokenize(query) + + # 1. Check exact match in index + if query_normalized in self._normalized_index: + name = self._normalized_index[query_normalized] + return MatchResult( + name=name, + category=self._exercises[name], + score=1.0, + ) + + # 2. Check aliases + query_lower = query.lower().strip() + if query_lower in self._aliases: + name = self._aliases[query_lower] + if name in self._exercises: + return MatchResult( + name=name, + category=self._exercises[name], + score=0.95, # High confidence for alias match + ) + + # 3. Score all candidates + candidates: List[Tuple[str, float]] = [] + for exercise_name in self._exercise_names: + score = self._score_candidate(query_tokens, exercise_name, query_normalized) + + # Apply equipment hint bonus + if equipment_hint: + hint_normalized = self._normalize(equipment_hint) + if hint_normalized.upper() in exercise_name: + score = min(1.0, score + 0.15) + + candidates.append((exercise_name, score)) + + # Sort by score descending + candidates.sort(key=lambda x: x[1], reverse=True) + + if not candidates or candidates[0][1] < self._threshold: + return None + + best_name, best_score = candidates[0] + + # Get alternatives (top 5 excluding best) + alternatives = [ + (name, self._exercises[name], score) + for name, score in candidates[1:6] + if score >= self._threshold + ] + + return MatchResult( + name=best_name, + category=self._exercises[best_name], + score=best_score, + alternatives=alternatives, + ) + + def resolve_or_raise( + self, query: str, equipment_hint: Optional[str] = None + ) -> MatchResult: + """Resolve exercise name or raise ValueError with suggestions. + + Args: + query: User-provided exercise name + equipment_hint: Optional equipment preference + + Returns: + MatchResult for the best match + + Raises: + ValueError: If no match found, includes suggestions + """ + result = self.resolve(query, equipment_hint) + if result is None: + suggestions = self.search(query, limit=3) + if suggestions: + suggestion_str = ", ".join(r.name for r in suggestions) + raise ValueError( + f"No match found for '{query}'. Did you mean: {suggestion_str}?" + ) + else: + raise ValueError( + f"No match found for '{query}'. " + f"Use search_exercises to find valid exercise names." + ) + return result + + def search(self, query: str, limit: int = 10) -> List[MatchResult]: + """Search for exercises matching the query. + + Args: + query: Search term + limit: Maximum number of results + + Returns: + List of MatchResult sorted by score descending + """ + if not query or not query.strip(): + return [] + + query_normalized = self._normalize(query) + query_tokens = self._tokenize(query) + + # Score all candidates + candidates: List[Tuple[str, float]] = [] + for exercise_name in self._exercise_names: + score = self._score_candidate(query_tokens, exercise_name, query_normalized) + candidates.append((exercise_name, score)) + + # Sort by score and take top N + candidates.sort(key=lambda x: x[1], reverse=True) + top_candidates = candidates[:limit] + + return [ + MatchResult( + name=name, + category=self._exercises[name], + score=score, + ) + for name, score in top_candidates + if score > 0 + ] + + def get_category(self, exercise_name: str) -> Optional[str]: + """Get the category for a Garmin exercise name. + + Args: + exercise_name: Exercise name in any format + + Returns: + Category name or None if not found + """ + # Try exact match first + if exercise_name in self._exercises: + return self._exercises[exercise_name] + + # Try normalized match + normalized = self._normalize(exercise_name) + if normalized in self._normalized_index: + name = self._normalized_index[normalized] + return self._exercises[name] + + return None + + def list_categories(self) -> List[str]: + """List all unique exercise categories.""" + return sorted(set(self._exercises.values())) + + def list_by_category(self, category: str) -> List[str]: + """List all exercises in a category. + + Args: + category: Category name (case-insensitive) + + Returns: + List of exercise names in the category + """ + category_upper = category.upper() + return sorted( + name for name, cat in self._exercises.items() if cat == category_upper + ) + + def list_by_equipment(self, equipment: str) -> List[str]: + """List all exercises for an equipment type. + + Args: + equipment: Equipment prefix (e.g., "DUMBBELL", "BARBELL") + + Returns: + List of exercise names using that equipment + """ + equipment_upper = equipment.upper() + return sorted( + name for name in self._exercise_names if name.startswith(equipment_upper) + ) + + +# ============================================================================= +# MODULE-LEVEL CONVENIENCE FUNCTIONS +# ============================================================================= + +_default_matcher: Optional[ExerciseMatcher] = None + + +def get_matcher() -> ExerciseMatcher: + """Get or create the default ExerciseMatcher instance.""" + global _default_matcher + if _default_matcher is None: + _default_matcher = ExerciseMatcher() + return _default_matcher + + +def resolve_exercise( + query: str, equipment_hint: Optional[str] = None +) -> Tuple[str, str]: + """Resolve a user-friendly exercise name to Garmin format. + + Convenience function that returns (exercise_name, category) tuple. + + Args: + query: User-provided exercise name + equipment_hint: Optional equipment preference + + Returns: + Tuple of (garmin_name, category) + + Raises: + ValueError: If no match found + """ + result = get_matcher().resolve_or_raise(query, equipment_hint) + return result.name, result.category + + +def search_exercises(query: str, limit: int = 10) -> List[MatchResult]: + """Search for exercises matching the query. + + Convenience function for exercise discovery. + + Args: + query: Search term + limit: Maximum results + + Returns: + List of MatchResult + """ + return get_matcher().search(query, limit) diff --git a/src/garmy/workouts/models.py b/src/garmy/workouts/models.py new file mode 100644 index 0000000..c867683 --- /dev/null +++ b/src/garmy/workouts/models.py @@ -0,0 +1,304 @@ +""" +Data models for Garmin workouts. + +This module defines dataclasses representing workout structures including +steps, repeat groups, segments, and complete workouts. +""" + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Union + +from .constants import ( + EndConditionType, + IntensityType, + SportType, + StepType, + TargetType, +) + + +@dataclass +class EndCondition: + """Defines how a workout step ends. + + Attributes: + condition_type: Type of end condition (time, distance, lap button, etc.) + value: Numeric value for the condition (seconds for time, meters for distance) + """ + + condition_type: EndConditionType = EndConditionType.LAP_BUTTON + value: Optional[float] = None + + @classmethod + def time(cls, seconds: float) -> "EndCondition": + """Create a time-based end condition.""" + return cls(condition_type=EndConditionType.TIME, value=seconds) + + @classmethod + def time_minutes(cls, minutes: float) -> "EndCondition": + """Create a time-based end condition from minutes.""" + return cls(condition_type=EndConditionType.TIME, value=minutes * 60) + + @classmethod + def distance(cls, meters: float) -> "EndCondition": + """Create a distance-based end condition.""" + return cls(condition_type=EndConditionType.DISTANCE, value=meters) + + @classmethod + def distance_km(cls, kilometers: float) -> "EndCondition": + """Create a distance-based end condition from kilometers.""" + return cls(condition_type=EndConditionType.DISTANCE, value=kilometers * 1000) + + @classmethod + def distance_miles(cls, miles: float) -> "EndCondition": + """Create a distance-based end condition from miles.""" + return cls(condition_type=EndConditionType.DISTANCE, value=miles * 1609.344) + + @classmethod + def lap_button(cls) -> "EndCondition": + """Create a lap button end condition.""" + return cls(condition_type=EndConditionType.LAP_BUTTON) + + @classmethod + def iterations(cls, count: int) -> "EndCondition": + """Create an iterations end condition for repeat groups.""" + return cls(condition_type=EndConditionType.ITERATIONS, value=float(count)) + + @classmethod + def reps(cls, count: int) -> "EndCondition": + """Create a reps end condition for strength exercises.""" + return cls(condition_type=EndConditionType.REPS, value=float(count)) + + +@dataclass +class Target: + """Defines the target metric for a workout step. + + Attributes: + target_type: Type of target (power zone, HR zone, pace zone, etc.) + value_low: Lower bound of target range (percentage or absolute) + value_high: Upper bound of target range (percentage or absolute) + zone_number: Predefined zone number (1-7 typically) + """ + + target_type: TargetType = TargetType.NO_TARGET + value_low: Optional[float] = None + value_high: Optional[float] = None + zone_number: Optional[int] = None + + @classmethod + def no_target(cls) -> "Target": + """Create a no-target specification.""" + return cls(target_type=TargetType.NO_TARGET) + + @classmethod + def power_zone( + cls, + low_percent: float, + high_percent: float, + ) -> "Target": + """Create a power zone target with percentage of FTP. + + Args: + low_percent: Lower bound as percentage of FTP (e.g., 88 for 88%) + high_percent: Upper bound as percentage of FTP (e.g., 93 for 93%) + """ + return cls( + target_type=TargetType.POWER_ZONE, + value_low=low_percent, + value_high=high_percent, + ) + + @classmethod + def heart_rate_zone( + cls, + low_percent: float, + high_percent: float, + ) -> "Target": + """Create a heart rate zone target with percentage of max HR. + + Args: + low_percent: Lower bound as percentage of max HR + high_percent: Upper bound as percentage of max HR + """ + return cls( + target_type=TargetType.HEART_RATE_ZONE, + value_low=low_percent, + value_high=high_percent, + ) + + @classmethod + def cadence_zone(cls, low_rpm: int, high_rpm: int) -> "Target": + """Create a cadence zone target. + + Args: + low_rpm: Lower bound RPM + high_rpm: Upper bound RPM + """ + return cls( + target_type=TargetType.CADENCE_ZONE, + value_low=float(low_rpm), + value_high=float(high_rpm), + ) + + @classmethod + def pace_zone( + cls, + low_pace_per_km: float, + high_pace_per_km: float, + ) -> "Target": + """Create a pace zone target. + + Args: + low_pace_per_km: Lower bound in seconds per kilometer + high_pace_per_km: Upper bound in seconds per kilometer + """ + return cls( + target_type=TargetType.PACE_ZONE, + value_low=low_pace_per_km, + value_high=high_pace_per_km, + ) + + +@dataclass +class WorkoutStep: + """A single executable step within a workout. + + Attributes: + step_type: Type of step (warmup, interval, recovery, etc.) + end_condition: How the step ends + target: Target metric for the step + description: Optional description text + step_order: Order within parent (set automatically during serialization) + intensity: Intensity level for the step + exercise_name: Name of the exercise (e.g., "BARBELL_DEADLIFT") + exercise_category: Category of the exercise (e.g., "DEADLIFT", "CORE") + weight_value: Target weight value for strength exercises + weight_unit: Unit for weight (e.g., "pound", "kilogram") + """ + + step_type: StepType = StepType.OTHER + end_condition: EndCondition = field(default_factory=EndCondition.lap_button) + target: Target = field(default_factory=Target.no_target) + description: Optional[str] = None + step_order: Optional[int] = None + intensity: IntensityType = IntensityType.ACTIVE + exercise_name: Optional[str] = None + exercise_category: Optional[str] = None + weight_value: Optional[float] = None + weight_unit: Optional[str] = None + + def __post_init__(self) -> None: + """Set default intensity based on step type.""" + if self.intensity == IntensityType.ACTIVE: + intensity_map = { + StepType.WARMUP: IntensityType.WARMUP, + StepType.COOLDOWN: IntensityType.COOLDOWN, + StepType.RECOVERY: IntensityType.RECOVERY, + StepType.REST: IntensityType.REST, + StepType.INTERVAL: IntensityType.INTERVAL, + } + self.intensity = intensity_map.get(self.step_type, IntensityType.ACTIVE) + + +@dataclass +class RepeatGroup: + """A group of steps that repeat multiple times. + + Attributes: + iterations: Number of times to repeat the steps + steps: List of steps within the repeat group + step_order: Order within parent workout (set during serialization) + """ + + iterations: int = 1 + steps: List[WorkoutStep] = field(default_factory=list) + step_order: Optional[int] = None + + def add_step(self, step: WorkoutStep) -> "RepeatGroup": + """Add a step to the repeat group.""" + self.steps.append(step) + return self + + +# Type alias for workout segment children +WorkoutStepOrRepeat = Union[WorkoutStep, RepeatGroup] + + +@dataclass +class WorkoutSegment: + """A segment of steps grouped by sport type. + + Used for multi-sport workouts where different segments have different sports. + + Attributes: + sport_type: Sport type for this segment + steps: List of steps and repeat groups in this segment + """ + + sport_type: SportType + steps: List[WorkoutStepOrRepeat] = field(default_factory=list) + + def add_step(self, step: WorkoutStepOrRepeat) -> "WorkoutSegment": + """Add a step or repeat group to the segment.""" + self.steps.append(step) + return self + + +@dataclass +class Workout: + """A complete workout definition. + + Attributes: + name: Name of the workout + sport_type: Primary sport type + description: Optional description + steps: List of steps and repeat groups + workout_id: Garmin workout ID (populated after creation) + owner_id: Owner user ID + segments: Optional segments for multi-sport workouts + """ + + name: str + sport_type: SportType = SportType.CYCLING + description: Optional[str] = None + steps: List[WorkoutStepOrRepeat] = field(default_factory=list) + workout_id: Optional[int] = None + owner_id: Optional[int] = None + segments: Optional[List[WorkoutSegment]] = None + + def add_step(self, step: WorkoutStepOrRepeat) -> "Workout": + """Add a step or repeat group to the workout.""" + self.steps.append(step) + return self + + def __str__(self) -> str: + """Return human-readable workout summary.""" + step_count = len(self.steps) + total_steps = step_count + for step in self.steps: + if isinstance(step, RepeatGroup): + total_steps += len(step.steps) * step.iterations - 1 + + parts = [f"Workout: {self.name}"] + parts.append(f"Sport: {self.sport_type.key}") + if self.description: + parts.append(f"Description: {self.description}") + parts.append(f"Steps: {step_count} ({total_steps} total with repeats)") + if self.workout_id: + parts.append(f"ID: {self.workout_id}") + + return " | ".join(parts) + + def to_dict(self) -> Dict[str, Any]: + """Convert workout to dictionary representation. + + This is a basic dict conversion. For API format, use WorkoutSerializer. + """ + return { + "name": self.name, + "sport_type": self.sport_type.key, + "description": self.description, + "workout_id": self.workout_id, + "step_count": len(self.steps), + } diff --git a/src/garmy/workouts/serializer.py b/src/garmy/workouts/serializer.py new file mode 100644 index 0000000..1e59953 --- /dev/null +++ b/src/garmy/workouts/serializer.py @@ -0,0 +1,442 @@ +""" +Serializer for converting workouts to/from Garmin API JSON format. + +This module handles the conversion between Python workout models and the +JSON format expected by the Garmin Connect workout API. +""" + +from typing import Any, Dict, List, Optional, Sequence, Union + +from .constants import ( + EndConditionType, + IntensityType, + SportType, + StepType, + TargetType, +) +from .models import ( + EndCondition, + RepeatGroup, + Target, + Workout, + WorkoutStep, + WorkoutStepOrRepeat, +) + + +class WorkoutSerializer: + """Converts Workout models to/from Garmin API JSON format.""" + + @classmethod + def to_api_format(cls, workout: Workout) -> Dict[str, Any]: + """Convert a Workout to Garmin API JSON format. + + Args: + workout: The Workout model to convert. + + Returns: + Dictionary in Garmin API format ready for POST/PUT. + """ + # Build workout steps with proper ordering + workout_steps = cls._serialize_steps(workout.steps) + + # Include both sportTypeId and sportTypeKey for Garmin to properly store the sport type + # Note: Garmin's IDs are inconsistent, but including the ID helps Garmin store it correctly + payload: Dict[str, Any] = { + "workoutName": workout.name, + "sportType": { + "sportTypeId": workout.sport_type.id, + "sportTypeKey": workout.sport_type.key, + }, + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": { + "sportTypeId": workout.sport_type.id, + "sportTypeKey": workout.sport_type.key, + }, + "workoutSteps": workout_steps, + } + ], + } + + if workout.description: + payload["description"] = workout.description + + if workout.workout_id: + payload["workoutId"] = workout.workout_id + + if workout.owner_id: + payload["ownerId"] = workout.owner_id + + return payload + + @classmethod + def _serialize_steps( + cls, + steps: Sequence[Union[WorkoutStep, RepeatGroup]], + start_order: int = 1, + ) -> List[Dict[str, Any]]: + """Serialize a list of steps with proper ordering. + + Args: + steps: List of WorkoutStep and RepeatGroup objects. + start_order: Starting step order number. + + Returns: + List of serialized step dictionaries. + """ + result: List[Dict[str, Any]] = [] + current_order = start_order + + for step in steps: + if isinstance(step, RepeatGroup): + serialized = cls._serialize_repeat_group(step, current_order) + result.append(serialized) + else: + serialized = cls._serialize_step(step, current_order) + result.append(serialized) + current_order += 1 + + return result + + @classmethod + def _serialize_step(cls, step: WorkoutStep, order: int) -> Dict[str, Any]: + """Serialize a single workout step. + + Args: + step: The WorkoutStep to serialize. + order: The step order number. + + Returns: + Serialized step dictionary. + """ + result: Dict[str, Any] = { + "type": "ExecutableStepDTO", + "stepOrder": order, + "stepType": { + "stepTypeId": step.step_type.type_id, + "stepTypeKey": step.step_type.value, + }, + "endCondition": { + "conditionTypeId": step.end_condition.condition_type.condition_type_id, + "conditionTypeKey": step.end_condition.condition_type.value, + }, + # Garmin expects endConditionValue at step level, not inside endCondition + "endConditionValue": step.end_condition.value, + "targetType": cls._serialize_target(step.target), + "intensityType": { + "intensityTypeId": step.intensity.intensity_type_id, + "intensityTypeKey": step.intensity.value, + }, + } + + if step.description: + result["description"] = step.description + + # Add exercise fields for strength training workouts + if step.exercise_category: + result["category"] = step.exercise_category + + if step.exercise_name: + result["exerciseName"] = step.exercise_name + + if step.weight_value is not None: + # Garmin stores weight in grams internally and uses kilogram as the standard unit + # We must convert to kilograms and send with kilogram unit + weight_unit = step.weight_unit or "pound" + weight_in_kg: float + if weight_unit == "kilogram": + weight_in_kg = step.weight_value + elif weight_unit == "pound": + # Convert pounds to kilograms + weight_in_kg = step.weight_value / 2.20462 + else: + # Assume kilograms + weight_in_kg = step.weight_value + + result["weightValue"] = round(weight_in_kg, 2) + # Always send as kilogram - Garmin will convert to user's display preference + result["weightUnit"] = { + "unitId": 8, + "unitKey": "kilogram", + "factor": 1000.0, + } + + return result + + @classmethod + def _serialize_end_condition(cls, end_condition: EndCondition) -> Dict[str, Any]: + """Serialize an end condition.""" + result: Dict[str, Any] = { + "conditionTypeId": end_condition.condition_type.condition_type_id, + "conditionTypeKey": end_condition.condition_type.value, + } + + if end_condition.value is not None: + # Garmin uses different value keys for different condition types + if end_condition.condition_type in ( + EndConditionType.ITERATIONS, + EndConditionType.REPS, + ): + result["conditionValue"] = int(end_condition.value) + else: + result["conditionValue"] = end_condition.value + + return result + + @classmethod + def _serialize_target(cls, target: Target) -> Dict[str, Any]: + """Serialize a target specification.""" + # Garmin API uses workoutTargetTypeId/Key, not targetTypeId/Key + result: Dict[str, Any] = { + "workoutTargetTypeId": target.target_type.target_type_id, + "workoutTargetTypeKey": target.target_type.value, + } + + if target.value_low is not None: + result["targetValueOne"] = target.value_low + + if target.value_high is not None: + result["targetValueTwo"] = target.value_high + + if target.zone_number is not None: + result["zoneNumber"] = target.zone_number + + return result + + @classmethod + def _serialize_repeat_group(cls, repeat: RepeatGroup, order: int) -> Dict[str, Any]: + """Serialize a repeat group. + + Args: + repeat: The RepeatGroup to serialize. + order: The step order number for the repeat group. + + Returns: + Serialized repeat group dictionary. + """ + # Serialize child steps with their own ordering starting at 1 + child_steps = cls._serialize_steps(repeat.steps, start_order=1) + + # Garmin API uses numberOfIterations for repeat groups + return { + "type": "RepeatGroupDTO", + "stepOrder": order, + "stepType": { + "stepTypeId": StepType.REPEAT.type_id, + "stepTypeKey": StepType.REPEAT.value, + }, + "numberOfIterations": repeat.iterations, + "endCondition": { + "conditionTypeId": EndConditionType.ITERATIONS.condition_type_id, + "conditionTypeKey": EndConditionType.ITERATIONS.value, + }, + "endConditionValue": float(repeat.iterations), + "workoutSteps": child_steps, + } + + @classmethod + def from_api_format(cls, data: Dict[str, Any]) -> Workout: + """Parse Garmin API JSON into a Workout model. + + Args: + data: Dictionary from Garmin API response. + + Returns: + Parsed Workout model. + """ + # Parse sport type - prefer key over ID since Garmin's IDs are inconsistent + # (e.g., strength_training workouts return sportTypeId=5 but sportTypeKey=strength_training) + sport_type_data = data.get("sportType", {}) + sport_type_id = sport_type_data.get("sportTypeId") + sport_type_key = sport_type_data.get("sportTypeKey", "") + + # Prefer key-based lookup since Garmin's IDs don't match their keys + if sport_type_key: + sport_type = SportType.from_key(sport_type_key) + # Fall back to ID if key lookup fails + if sport_type == SportType.OTHER and sport_type_id is not None: + sport_type = SportType.from_id(sport_type_id) + elif sport_type_id is not None: + sport_type = SportType.from_id(sport_type_id) + else: + sport_type = SportType.OTHER + + # Parse steps from segments + steps: List[WorkoutStepOrRepeat] = [] + segments = data.get("workoutSegments", []) + if segments: + first_segment = segments[0] + workout_steps = first_segment.get("workoutSteps", []) + steps = cls._parse_steps(workout_steps) + + return Workout( + name=data.get("workoutName", "Untitled"), + sport_type=sport_type, + description=data.get("description"), + steps=steps, + workout_id=data.get("workoutId"), + owner_id=data.get("ownerId"), + ) + + @classmethod + def _parse_steps( + cls, steps_data: List[Dict[str, Any]] + ) -> List[WorkoutStepOrRepeat]: + """Parse a list of step dictionaries into models. + + Args: + steps_data: List of step dictionaries from API. + + Returns: + List of WorkoutStep and RepeatGroup models. + """ + result: List[WorkoutStepOrRepeat] = [] + + for step_data in steps_data: + step_type_str = step_data.get("type", "") + + if step_type_str == "RepeatGroupDTO": + result.append(cls._parse_repeat_group(step_data)) + else: + result.append(cls._parse_step(step_data)) + + return result + + @classmethod + def _parse_step(cls, data: Dict[str, Any]) -> WorkoutStep: + """Parse a single step dictionary into a WorkoutStep. + + Args: + data: Step dictionary from API. + + Returns: + Parsed WorkoutStep model. + """ + # Parse step type (handle None values) + step_type_data = data.get("stepType") or {} + step_type = StepType.from_type_id(step_type_data.get("stepTypeId", 7)) + + # Parse end condition (handle None values) + # Note: Garmin API returns endConditionValue at step level, not inside endCondition + end_condition_data = data.get("endCondition") or {} + end_condition_value = data.get("endConditionValue") # Value is at step level! + end_condition = cls._parse_end_condition( + end_condition_data, end_condition_value + ) + + # Parse target (handle None values - Garmin sometimes returns targetType: null) + target = cls._parse_target(data.get("targetType") or {}) + + # Parse intensity (handle None values) + intensity_data = data.get("intensityType") or {} + intensity = IntensityType.from_intensity_type_id( + intensity_data.get("intensityTypeId", 1) + ) + + # Parse exercise info for strength workouts + exercise_name = data.get("exerciseName") + exercise_category = data.get("category") + + # Parse weight info + # Garmin stores weight in kilograms, convert to pounds for display + weight_value_raw = data.get("weightValue") + weight_value: Optional[float] = None + weight_unit: Optional[str] = None + + if weight_value_raw is not None and weight_value_raw > 0: + # Convert from kilograms to pounds + weight_value = round(weight_value_raw * 2.20462, 1) + weight_unit = "pound" + elif weight_value_raw is not None and weight_value_raw < 0: + # Clean up negative placeholder values from API + weight_value = None + weight_unit = None + + return WorkoutStep( + step_type=step_type, + end_condition=end_condition, + target=target, + description=data.get("description"), + step_order=data.get("stepOrder"), + intensity=intensity, + exercise_name=exercise_name, + exercise_category=exercise_category, + weight_value=weight_value, + weight_unit=weight_unit, + ) + + @classmethod + def _parse_end_condition( + cls, data: Dict[str, Any], step_level_value: Optional[float] = None + ) -> EndCondition: + """Parse an end condition dictionary. + + Args: + data: End condition dictionary from API. + step_level_value: Value from step level (endConditionValue), which Garmin + uses instead of putting it inside the endCondition object. + """ + condition_type = EndConditionType.from_condition_type_id( + data.get("conditionTypeId", 1) + ) + # Prefer step-level value (endConditionValue) over nested value (conditionValue) + value = ( + step_level_value + if step_level_value is not None + else data.get("conditionValue") + ) + + return EndCondition(condition_type=condition_type, value=value) + + @classmethod + def _parse_target(cls, data: Dict[str, Any]) -> Target: + """Parse a target dictionary.""" + # Handle both field name formats (workoutTargetTypeId and targetTypeId) + target_type_id = data.get("workoutTargetTypeId") or data.get("targetTypeId", 1) + target_type = TargetType.from_target_type_id(target_type_id) + + # Handle both value field name formats + value_low = data.get("targetValueOne") or data.get("targetValueLow") + value_high = data.get("targetValueTwo") or data.get("targetValueHigh") + + return Target( + target_type=target_type, + value_low=value_low, + value_high=value_high, + zone_number=data.get("zoneNumber"), + ) + + @classmethod + def _parse_repeat_group(cls, data: Dict[str, Any]) -> RepeatGroup: + """Parse a repeat group dictionary. + + Args: + data: Repeat group dictionary from API. + + Returns: + Parsed RepeatGroup model. + """ + # Try different iteration field names (numberOfIterations, endConditionValue, conditionValue) + iterations = data.get("numberOfIterations") + if iterations is None: + iterations = data.get("endConditionValue") + if iterations is None: + end_condition = data.get("endCondition") or {} + iterations = end_condition.get("conditionValue", 1) + iterations = int(iterations) + + # Parse child steps + child_steps_data = data.get("workoutSteps", []) + steps = [ + cls._parse_step(s) + for s in child_steps_data + if s.get("type") != "RepeatGroupDTO" + ] + + return RepeatGroup( + iterations=iterations, + steps=steps, + step_order=data.get("stepOrder"), + ) diff --git a/tests/test_mcp_workouts.py b/tests/test_mcp_workouts.py new file mode 100644 index 0000000..1e9b7c0 --- /dev/null +++ b/tests/test_mcp_workouts.py @@ -0,0 +1,428 @@ +"""Tests for MCP workout tools.""" + +import json +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from garmy.mcp.config import MCPConfig + + +class TestMCPWorkoutToolsConfig: + """Tests for MCP workout configuration.""" + + def test_config_enable_workouts_default_false(self, tmp_path: Path) -> None: + """Test that workouts are disabled by default.""" + db_file = tmp_path / "test.db" + db_file.touch() + config = MCPConfig(db_path=db_file) + assert config.enable_workouts is False + + def test_config_enable_workouts_true(self, tmp_path: Path) -> None: + """Test that workouts can be enabled.""" + db_file = tmp_path / "test.db" + db_file.touch() + config = MCPConfig(db_path=db_file, enable_workouts=True) + assert config.enable_workouts is True + + def test_config_enable_workouts_with_profile_path(self, tmp_path: Path) -> None: + """Test workouts config with profile path.""" + db_file = tmp_path / "test.db" + db_file.touch() + config = MCPConfig( + db_path=db_file, + enable_workouts=True, + profile_path=tmp_path, + ) + assert config.enable_workouts is True + assert config.profile_path == tmp_path + + +class TestAddStepsFromJson: + """Tests for _add_steps_from_json helper function.""" + + def test_add_warmup_step(self) -> None: + """Test adding a warmup step from JSON.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.CYCLING) + + # Import the helper function by creating a mock server context + steps = [{"type": "warmup", "minutes": 10}] + + # Add steps manually to test the logic + for step in steps: + step_type = step.get("type", "interval").lower() + minutes = step.get("minutes") + + if step_type == "warmup": + builder.warmup(minutes=minutes) + + workout = builder.build() + assert len(workout.steps) == 1 + assert workout.steps[0].step_type.value == "warmup" + + def test_add_interval_with_target(self) -> None: + """Test adding an interval step with power target.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.CYCLING) + + builder.interval(minutes=5, target_power=(90, 95)) + + workout = builder.build() + assert len(workout.steps) == 1 + assert workout.steps[0].step_type.value == "interval" + assert workout.steps[0].target.value_low == 90 + assert workout.steps[0].target.value_high == 95 + + def test_add_repeat_group(self) -> None: + """Test adding a repeat group from JSON structure.""" + from garmy.workouts import SportType, WorkoutBuilder + from garmy.workouts.models import RepeatGroup + + builder = WorkoutBuilder("Test", SportType.CYCLING) + + # Add repeat group with nested steps + repeat_builder = builder.repeat(3) + repeat_builder.interval(minutes=5, target_power=(90, 95)) + repeat_builder.recovery(minutes=2) + repeat_builder.end_repeat() + + workout = builder.build() + assert len(workout.steps) == 1 + assert isinstance(workout.steps[0], RepeatGroup) + assert workout.steps[0].iterations == 3 + assert len(workout.steps[0].steps) == 2 + + def test_add_cooldown_step(self) -> None: + """Test adding a cooldown step.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.CYCLING) + builder.cooldown(minutes=10) + + workout = builder.build() + assert len(workout.steps) == 1 + assert workout.steps[0].step_type.value == "cooldown" + + def test_add_recovery_step(self) -> None: + """Test adding a recovery step.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.CYCLING) + builder.recovery(minutes=3, target_hr=(60, 70)) + + workout = builder.build() + assert len(workout.steps) == 1 + assert workout.steps[0].step_type.value == "recovery" + + def test_add_rest_step(self) -> None: + """Test adding a rest step.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.CYCLING) + builder.rest(minutes=2) + + workout = builder.build() + assert len(workout.steps) == 1 + assert workout.steps[0].step_type.value == "rest" + + +class TestWorkoutToolsIntegration: + """Integration tests for workout MCP tools behavior.""" + + def test_list_workouts_returns_expected_structure(self) -> None: + """Test that list_workouts returns expected structure.""" + from garmy.workouts import SportType, Workout + + # Create mock workout objects + mock_workouts = [ + Workout( + name="Test Workout 1", + sport_type=SportType.CYCLING, + workout_id=123, + steps=[], + ), + Workout( + name="Test Workout 2", + sport_type=SportType.RUNNING, + workout_id=456, + steps=[], + ), + ] + + # Format as the MCP tool would + result = { + "success": True, + "count": len(mock_workouts), + "workouts": [ + { + "workout_id": w.workout_id, + "name": w.name, + "sport_type": w.sport_type.key, + "description": w.description, + "step_count": len(w.steps), + } + for w in mock_workouts + ], + } + + assert result["success"] is True + assert result["count"] == 2 + assert len(result["workouts"]) == 2 + assert result["workouts"][0]["name"] == "Test Workout 1" + assert result["workouts"][1]["sport_type"] == "running" + + def test_get_workout_formats_steps_correctly(self) -> None: + """Test that get_workout formats steps correctly.""" + from garmy.workouts import ( + EndCondition, + SportType, + StepType, + Target, + Workout, + WorkoutStep, + ) + from garmy.workouts.models import RepeatGroup + + # Create a workout with various step types + workout = Workout( + name="Test", + sport_type=SportType.CYCLING, + workout_id=123, + steps=[ + WorkoutStep( + step_type=StepType.WARMUP, + end_condition=EndCondition.time_minutes(10), + target=Target.no_target(), + ), + RepeatGroup( + iterations=3, + steps=[ + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(5), + target=Target.power_zone(90, 95), + ), + ], + ), + ], + ) + + # Format steps as the MCP tool would + steps_info = [] + for i, step in enumerate(workout.steps): + if isinstance(step, RepeatGroup): + steps_info.append( + { + "index": i + 1, + "type": "repeat", + "iterations": step.iterations, + "steps": [ + { + "type": s.step_type.value, + "duration_seconds": s.end_condition.value, + "target_type": s.target.target_type.value, + } + for s in step.steps + ], + } + ) + else: + steps_info.append( + { + "index": i + 1, + "type": step.step_type.value, + "duration_seconds": step.end_condition.value, + } + ) + + assert len(steps_info) == 2 + assert steps_info[0]["type"] == "warmup" + assert steps_info[1]["type"] == "repeat" + assert steps_info[1]["iterations"] == 3 + + def test_sport_type_validation(self) -> None: + """Test sport type validation for create_workout.""" + from garmy.workouts import SportType + + # Valid sport types (using actual API keys) + valid_types = ["cycling", "running", "swimming", "strength_training"] + for st in valid_types: + sport = SportType.from_key(st) + assert sport.key == st + + # Unknown sport type returns OTHER (with key "other") + sport = SportType.from_key("invalid_sport") + assert sport == SportType.OTHER + assert sport.key == "other" + + def test_date_format_validation(self) -> None: + """Test date format validation for schedule_workout.""" + import re + + valid_dates = ["2024-01-15", "2025-12-31", "2023-06-01"] + invalid_dates = ["01-15-2024", "2024/01/15", "2024-1-15", "not-a-date"] + + pattern = r"^\d{4}-\d{2}-\d{2}$" + + for d in valid_dates: + assert re.match(pattern, d) is not None + + for d in invalid_dates: + assert re.match(pattern, d) is None + + def test_workout_id_validation(self) -> None: + """Test workout ID validation.""" + # Valid IDs + assert 1 >= 1 # Minimum valid ID + assert 123456 >= 1 + + # Invalid IDs + assert not (0 >= 1) + assert not (-1 >= 1) + + +class TestCreateWorkoutJsonParsing: + """Tests for JSON parsing in create_workout.""" + + def test_parse_simple_steps_json(self) -> None: + """Test parsing simple steps JSON.""" + steps_json = json.dumps( + [ + {"type": "warmup", "minutes": 10}, + {"type": "interval", "minutes": 5, "target_power": [90, 95]}, + {"type": "cooldown", "minutes": 10}, + ] + ) + + steps = json.loads(steps_json) + assert len(steps) == 3 + assert steps[0]["type"] == "warmup" + assert steps[1]["target_power"] == [90, 95] + + def test_parse_repeat_steps_json(self) -> None: + """Test parsing repeat group in steps JSON.""" + steps_json = json.dumps( + [ + {"type": "warmup", "minutes": 10}, + { + "type": "repeat", + "iterations": 3, + "steps": [ + {"type": "interval", "minutes": 5}, + {"type": "recovery", "minutes": 2}, + ], + }, + {"type": "cooldown", "minutes": 10}, + ] + ) + + steps = json.loads(steps_json) + assert len(steps) == 3 + assert steps[1]["type"] == "repeat" + assert steps[1]["iterations"] == 3 + assert len(steps[1]["steps"]) == 2 + + def test_parse_steps_with_all_targets(self) -> None: + """Test parsing steps with various target types.""" + steps_json = json.dumps( + [ + {"type": "warmup", "minutes": 10, "target_power": [50, 60]}, + {"type": "interval", "minutes": 5, "target_hr": [150, 165]}, + {"type": "interval", "minutes": 5, "target_cadence": [90, 100]}, + ] + ) + + steps = json.loads(steps_json) + assert steps[0]["target_power"] == [50, 60] + assert steps[1]["target_hr"] == [150, 165] + assert steps[2]["target_cadence"] == [90, 100] + + def test_invalid_json_raises_error(self) -> None: + """Test that invalid JSON raises an error.""" + invalid_json = "not valid json" + + with pytest.raises(json.JSONDecodeError): + json.loads(invalid_json) + + def test_convert_list_to_tuple(self) -> None: + """Test converting target lists to tuples.""" + target_power = [90, 95] + if isinstance(target_power, list): + target_power = tuple(target_power) + + assert isinstance(target_power, tuple) + assert target_power == (90, 95) + + +class TestWorkoutToolsEdgeCases: + """Edge case tests for workout tools.""" + + def test_empty_workout_name(self) -> None: + """Test handling empty workout name.""" + from garmy.workouts import SportType, WorkoutBuilder + + # Empty name should still work (API may reject it) + builder = WorkoutBuilder("", SportType.CYCLING) + workout = builder.build() + assert workout.name == "" + + def test_workout_with_no_steps(self) -> None: + """Test creating workout with no steps.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Empty Workout", SportType.CYCLING) + workout = builder.build() + assert len(workout.steps) == 0 + + def test_very_long_workout_description(self) -> None: + """Test workout with very long description.""" + from garmy.workouts import SportType, WorkoutBuilder + + long_desc = "A" * 1000 + builder = WorkoutBuilder("Test", SportType.CYCLING) + builder.with_description(long_desc) + workout = builder.build() + assert len(workout.description) == 1000 + + def test_step_with_seconds_instead_of_minutes(self) -> None: + """Test creating step with seconds duration.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.CYCLING) + builder.interval(seconds=30) + workout = builder.build() + assert workout.steps[0].end_condition.value == 30 + + def test_step_with_distance(self) -> None: + """Test creating step with distance duration.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.RUNNING) + builder.interval(distance_km=1.0) + workout = builder.build() + # Distance is stored in meters + assert workout.steps[0].end_condition.value == 1000 + + def test_lap_button_end_condition(self) -> None: + """Test creating step with lap button end condition.""" + from garmy.workouts import SportType, WorkoutBuilder + + builder = WorkoutBuilder("Test", SportType.CYCLING) + builder.interval(lap_button=True) + workout = builder.build() + assert workout.steps[0].end_condition.condition_type.value == "lap.button" + + def test_schedule_workout_date_edge_cases(self) -> None: + """Test schedule workout with various date formats.""" + import re + + pattern = r"^\d{4}-\d{2}-\d{2}$" + + # Edge case dates + assert re.match(pattern, "2024-02-29") is not None # Leap year + assert re.match(pattern, "2024-12-31") is not None # Year end + assert re.match(pattern, "2024-01-01") is not None # Year start diff --git a/tests/test_workouts_builder.py b/tests/test_workouts_builder.py new file mode 100644 index 0000000..6a842ce --- /dev/null +++ b/tests/test_workouts_builder.py @@ -0,0 +1,337 @@ +"""Tests for garmy.workouts.builder module.""" + +import pytest + +from garmy.workouts.builder import RepeatBuilder, WorkoutBuilder +from garmy.workouts.constants import ( + EndConditionType, + SportType, + StepType, + TargetType, +) +from garmy.workouts.models import RepeatGroup, Workout, WorkoutStep + + +class TestWorkoutBuilder: + """Test cases for WorkoutBuilder class.""" + + def test_builder_initialization(self): + """Test WorkoutBuilder initialization.""" + builder = WorkoutBuilder("Test Workout", SportType.CYCLING) + + assert builder._name == "Test Workout" + assert builder._sport_type == SportType.CYCLING + assert builder._description is None + assert builder._steps == [] + + def test_builder_default_sport_type(self): + """Test WorkoutBuilder defaults to cycling.""" + builder = WorkoutBuilder("Test") + assert builder._sport_type == SportType.CYCLING + + def test_with_description(self): + """Test with_description method.""" + builder = WorkoutBuilder("Test") + result = builder.with_description("A great workout") + + assert result is builder # Returns self for chaining + assert builder._description == "A great workout" + + def test_warmup_with_time(self): + """Test warmup method with time.""" + builder = WorkoutBuilder("Test") + result = builder.warmup(minutes=10) + + assert result is builder + assert len(builder._steps) == 1 + + step = builder._steps[0] + assert step.step_type == StepType.WARMUP + assert step.end_condition.condition_type == EndConditionType.TIME + assert step.end_condition.value == 600 # 10 minutes + + def test_warmup_with_target_power(self): + """Test warmup with power target.""" + builder = WorkoutBuilder("Test") + builder.warmup(minutes=10, target_power=(50, 60)) + + step = builder._steps[0] + assert step.target.target_type == TargetType.POWER_ZONE + assert step.target.value_low == 50 + assert step.target.value_high == 60 + + def test_interval_with_target_hr(self): + """Test interval with heart rate target.""" + builder = WorkoutBuilder("Test") + builder.interval(minutes=5, target_hr=(75, 85)) + + step = builder._steps[0] + assert step.step_type == StepType.INTERVAL + assert step.target.target_type == TargetType.HEART_RATE_ZONE + assert step.target.value_low == 75 + assert step.target.value_high == 85 + + def test_interval_with_cadence(self): + """Test interval with cadence target.""" + builder = WorkoutBuilder("Test") + builder.interval(minutes=5, target_cadence=(85, 95)) + + step = builder._steps[0] + assert step.target.target_type == TargetType.CADENCE_ZONE + assert step.target.value_low == 85 + assert step.target.value_high == 95 + + def test_recovery(self): + """Test recovery method.""" + builder = WorkoutBuilder("Test") + builder.recovery(minutes=2) + + step = builder._steps[0] + assert step.step_type == StepType.RECOVERY + assert step.end_condition.value == 120 + + def test_rest(self): + """Test rest method.""" + builder = WorkoutBuilder("Test") + builder.rest(seconds=30) + + step = builder._steps[0] + assert step.step_type == StepType.REST + assert step.end_condition.value == 30 + + def test_cooldown(self): + """Test cooldown method.""" + builder = WorkoutBuilder("Test") + builder.cooldown(minutes=10) + + step = builder._steps[0] + assert step.step_type == StepType.COOLDOWN + + def test_lap_button_end_condition(self): + """Test step with lap button end condition.""" + builder = WorkoutBuilder("Test") + builder.warmup(lap_button=True) + + step = builder._steps[0] + assert step.end_condition.condition_type == EndConditionType.LAP_BUTTON + + def test_distance_km(self): + """Test step with distance in km.""" + builder = WorkoutBuilder("Test") + builder.interval(distance_km=5) + + step = builder._steps[0] + assert step.end_condition.condition_type == EndConditionType.DISTANCE + assert step.end_condition.value == 5000 + + def test_distance_miles(self): + """Test step with distance in miles.""" + builder = WorkoutBuilder("Test") + builder.interval(distance_miles=1) + + step = builder._steps[0] + assert step.end_condition.condition_type == EndConditionType.DISTANCE + assert step.end_condition.value == pytest.approx(1609.344, rel=0.01) + + def test_step_description(self): + """Test step with description.""" + builder = WorkoutBuilder("Test") + builder.interval(minutes=5, description="Push hard!") + + step = builder._steps[0] + assert step.description == "Push hard!" + + def test_generic_step_method(self): + """Test generic step method.""" + builder = WorkoutBuilder("Test") + builder.step(StepType.INTERVAL, minutes=5, target_power=(90, 95)) + + step = builder._steps[0] + assert step.step_type == StepType.INTERVAL + assert step.target.value_low == 90 + + def test_add_step(self): + """Test add_step method with pre-built step.""" + builder = WorkoutBuilder("Test") + step = WorkoutStep(step_type=StepType.WARMUP) + + result = builder.add_step(step) + + assert result is builder + assert builder._steps[0] is step + + def test_build(self): + """Test build method creates Workout object.""" + workout = ( + WorkoutBuilder("Test Workout", SportType.RUNNING) + .with_description("My description") + .warmup(minutes=10) + .interval(minutes=20) + .cooldown(minutes=10) + .build() + ) + + assert isinstance(workout, Workout) + assert workout.name == "Test Workout" + assert workout.sport_type == SportType.RUNNING + assert workout.description == "My description" + assert len(workout.steps) == 3 + + def test_full_workout_chain(self): + """Test full workout building with chained methods.""" + workout = ( + WorkoutBuilder("Sweet Spot 2x20", SportType.CYCLING) + .with_description("Sweet spot training") + .warmup(minutes=15, target_power=(50, 65)) + .interval(minutes=20, target_power=(88, 93)) + .recovery(minutes=5, target_power=(40, 50)) + .interval(minutes=20, target_power=(88, 93)) + .cooldown(minutes=10, target_power=(40, 55)) + .build() + ) + + assert workout.name == "Sweet Spot 2x20" + assert len(workout.steps) == 5 + assert workout.steps[0].step_type == StepType.WARMUP + assert workout.steps[1].step_type == StepType.INTERVAL + assert workout.steps[4].step_type == StepType.COOLDOWN + + +class TestRepeatBuilder: + """Test cases for RepeatBuilder class.""" + + def test_repeat_builder_initialization(self): + """Test RepeatBuilder initialization.""" + parent = WorkoutBuilder("Test") + repeat_builder = RepeatBuilder(parent, 3) + + assert repeat_builder._parent is parent + assert repeat_builder._iterations == 3 + assert repeat_builder._steps == [] + + def test_repeat_interval(self): + """Test adding interval to repeat group.""" + parent = WorkoutBuilder("Test") + repeat_builder = parent.repeat(3) + + result = repeat_builder.interval(minutes=5) + + assert result is repeat_builder # Returns self for chaining + assert len(repeat_builder._steps) == 1 + assert repeat_builder._steps[0].step_type == StepType.INTERVAL + + def test_repeat_recovery(self): + """Test adding recovery to repeat group.""" + parent = WorkoutBuilder("Test") + repeat_builder = parent.repeat(3) + + repeat_builder.recovery(minutes=2) + + assert len(repeat_builder._steps) == 1 + assert repeat_builder._steps[0].step_type == StepType.RECOVERY + + def test_repeat_rest(self): + """Test adding rest to repeat group.""" + parent = WorkoutBuilder("Test") + repeat_builder = parent.repeat(3) + + repeat_builder.rest(seconds=30) + + assert len(repeat_builder._steps) == 1 + assert repeat_builder._steps[0].step_type == StepType.REST + + def test_repeat_generic_step(self): + """Test adding generic step to repeat group.""" + parent = WorkoutBuilder("Test") + repeat_builder = parent.repeat(3) + + repeat_builder.step(StepType.INTERVAL, minutes=5) + + assert repeat_builder._steps[0].step_type == StepType.INTERVAL + + def test_end_repeat_returns_parent(self): + """Test end_repeat returns parent builder.""" + parent = WorkoutBuilder("Test") + repeat_builder = parent.repeat(3) + repeat_builder.interval(minutes=5) + + result = repeat_builder.end_repeat() + + assert result is parent + + def test_end_repeat_adds_repeat_group(self): + """Test end_repeat adds RepeatGroup to parent.""" + parent = WorkoutBuilder("Test") + parent.repeat(3).interval(minutes=5).recovery(minutes=2).end_repeat() + + assert len(parent._steps) == 1 + assert isinstance(parent._steps[0], RepeatGroup) + assert parent._steps[0].iterations == 3 + assert len(parent._steps[0].steps) == 2 + + def test_repeat_with_targets(self): + """Test repeat steps with targets.""" + parent = WorkoutBuilder("Test") + parent.repeat(3).interval(minutes=5, target_power=(90, 95)).recovery( + minutes=2, target_hr=(50, 60) + ).end_repeat() + + repeat_group = parent._steps[0] + assert repeat_group.steps[0].target.target_type == TargetType.POWER_ZONE + assert repeat_group.steps[1].target.target_type == TargetType.HEART_RATE_ZONE + + def test_full_workout_with_repeat(self): + """Test building full workout with repeat group.""" + workout = ( + WorkoutBuilder("VO2max Intervals", SportType.CYCLING) + .warmup(minutes=15) + .repeat(5) + .interval(minutes=3, target_power=(105, 120)) + .recovery(minutes=3, target_power=(40, 50)) + .end_repeat() + .cooldown(minutes=10) + .build() + ) + + assert len(workout.steps) == 3 # warmup, repeat, cooldown + assert isinstance(workout.steps[1], RepeatGroup) + assert workout.steps[1].iterations == 5 + assert len(workout.steps[1].steps) == 2 + + +class TestWorkoutBuilderIntegration: + """Integration tests for workout building.""" + + def test_complex_workout(self): + """Test building a complex workout with multiple elements.""" + workout = ( + WorkoutBuilder("Race Prep", SportType.RUNNING) + .with_description("Pre-race sharpening workout") + .warmup(minutes=15, target_hr=(60, 70)) + .repeat(3) + .interval(distance_km=1, target_hr=(85, 90), description="Fast km") + .recovery(minutes=2, target_hr=(60, 65)) + .end_repeat() + .rest(minutes=5) + .repeat(6) + .interval(seconds=30, target_hr=(90, 95), description="Strides") + .recovery(seconds=90) + .end_repeat() + .cooldown(minutes=10, target_hr=(55, 65)) + .build() + ) + + assert workout.name == "Race Prep" + assert workout.sport_type == SportType.RUNNING + assert len(workout.steps) == 5 # warmup, repeat, rest, repeat, cooldown + + # First repeat group + first_repeat = workout.steps[1] + assert isinstance(first_repeat, RepeatGroup) + assert first_repeat.iterations == 3 + assert first_repeat.steps[0].description == "Fast km" + + # Second repeat group + second_repeat = workout.steps[3] + assert isinstance(second_repeat, RepeatGroup) + assert second_repeat.iterations == 6 diff --git a/tests/test_workouts_client.py b/tests/test_workouts_client.py new file mode 100644 index 0000000..b201303 --- /dev/null +++ b/tests/test_workouts_client.py @@ -0,0 +1,401 @@ +"""Tests for garmy.workouts.client module.""" + +from unittest.mock import MagicMock, Mock, patch + +import pytest + +from garmy.workouts.client import WorkoutClient +from garmy.workouts.constants import SportType, StepType +from garmy.workouts.models import ( + EndCondition, + RepeatGroup, + Workout, + WorkoutStep, +) + + +class TestWorkoutClient: + """Test cases for WorkoutClient class.""" + + @pytest.fixture + def mock_api_client(self): + """Create a mock API client.""" + client = MagicMock() + client.connectapi = MagicMock() + client.request = MagicMock() + return client + + @pytest.fixture + def workout_client(self, mock_api_client): + """Create a WorkoutClient with mocked API client.""" + return WorkoutClient(mock_api_client) + + def test_workout_client_initialization(self, mock_api_client): + """Test WorkoutClient initialization.""" + client = WorkoutClient(mock_api_client) + assert client.api_client is mock_api_client + + def test_workout_headers(self, workout_client): + """Test workout headers are correctly defined.""" + assert "Referer" in workout_client.WORKOUT_HEADERS + assert "nk" in workout_client.WORKOUT_HEADERS + assert "workouts" in workout_client.WORKOUT_HEADERS["Referer"] + + def test_list_workouts_empty(self, workout_client, mock_api_client): + """Test list_workouts returns empty list when no workouts.""" + mock_api_client.connectapi.return_value = [] + + result = workout_client.list_workouts() + + assert result == [] + mock_api_client.connectapi.assert_called_once() + + def test_list_workouts_with_results(self, workout_client, mock_api_client): + """Test list_workouts returns parsed workouts.""" + mock_api_client.connectapi.return_value = [ + { + "workoutId": 1, + "workoutName": "Workout 1", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [], + }, + { + "workoutId": 2, + "workoutName": "Workout 2", + "sportType": {"sportTypeId": 1, "sportTypeKey": "running"}, + "workoutSegments": [], + }, + ] + + result = workout_client.list_workouts() + + assert len(result) == 2 + assert result[0].name == "Workout 1" + assert result[0].workout_id == 1 + assert result[1].name == "Workout 2" + assert result[1].sport_type == SportType.RUNNING + + def test_list_workouts_parameters(self, workout_client, mock_api_client): + """Test list_workouts passes correct parameters.""" + mock_api_client.connectapi.return_value = [] + + workout_client.list_workouts( + limit=50, + start=10, + my_workouts_only=False, + order_by="UPDATE_DATE", + order_seq="DESC", + ) + + call_args = mock_api_client.connectapi.call_args + endpoint = call_args[0][0] + + assert "limit=50" in endpoint + assert "start=10" in endpoint + assert "myWorkoutsOnly=false" in endpoint + assert "orderBy=UPDATE_DATE" in endpoint + assert "orderSeq=DESC" in endpoint + + def test_list_workouts_none_response(self, workout_client, mock_api_client): + """Test list_workouts handles None response.""" + mock_api_client.connectapi.return_value = None + + result = workout_client.list_workouts() + + assert result == [] + + def test_get_workout(self, workout_client, mock_api_client): + """Test get_workout returns parsed workout.""" + mock_api_client.connectapi.return_value = { + "workoutId": 12345, + "workoutName": "Test Workout", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "description": "Test description", + "workoutSegments": [], + } + + result = workout_client.get_workout(12345) + + assert result is not None + assert result.workout_id == 12345 + assert result.name == "Test Workout" + assert result.description == "Test description" + mock_api_client.connectapi.assert_called_once() + assert ( + "/workout-service/workout/12345" + in mock_api_client.connectapi.call_args[0][0] + ) + + def test_get_workout_not_found(self, workout_client, mock_api_client): + """Test get_workout returns None when not found.""" + mock_api_client.connectapi.return_value = None + + result = workout_client.get_workout(99999) + + assert result is None + + def test_create_workout(self, workout_client, mock_api_client): + """Test create_workout creates and returns workout with ID.""" + workout = Workout(name="New Workout", sport_type=SportType.CYCLING) + workout.add_step(WorkoutStep(step_type=StepType.WARMUP)) + + mock_api_client.connectapi.return_value = { + "workoutId": 12345, + "workoutName": "New Workout", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": {"sportTypeId": 2}, + "workoutSteps": [ + { + "type": "ExecutableStepDTO", + "stepType": {"stepTypeId": 1}, + "endCondition": {"conditionTypeId": 1}, + "targetType": {"targetTypeId": 1}, + "intensityType": {"intensityTypeId": 3}, + } + ], + } + ], + } + + result = workout_client.create_workout(workout) + + assert result.workout_id == 12345 + call_args = mock_api_client.connectapi.call_args + assert call_args[1]["method"] == "POST" + assert "json" in call_args[1] + + def test_create_workout_raw(self, workout_client, mock_api_client): + """Test create_workout_raw with raw API format.""" + raw_data = { + "workoutName": "Raw Workout", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [], + } + + mock_api_client.connectapi.return_value = { + "workoutId": 12345, + **raw_data, + } + + result = workout_client.create_workout_raw(raw_data) + + assert result["workoutId"] == 12345 + assert result["workoutName"] == "Raw Workout" + + def test_update_workout(self, workout_client, mock_api_client): + """Test update_workout updates existing workout.""" + workout = Workout( + name="Updated Workout", sport_type=SportType.CYCLING, workout_id=12345 + ) + + mock_api_client.connectapi.return_value = { + "workoutId": 12345, + "workoutName": "Updated Workout", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [], + } + + result = workout_client.update_workout(workout) + + assert result.name == "Updated Workout" + call_args = mock_api_client.connectapi.call_args + assert "/workout-service/workout/12345" in call_args[0][0] + assert call_args[1]["method"] == "PUT" + + def test_update_workout_without_id(self, workout_client): + """Test update_workout raises error without workout_id.""" + workout = Workout(name="No ID Workout") + + with pytest.raises(ValueError, match="workout_id must be set"): + workout_client.update_workout(workout) + + def test_delete_workout(self, workout_client, mock_api_client): + """Test delete_workout deletes workout.""" + mock_api_client.connectapi.return_value = None + + result = workout_client.delete_workout(12345) + + assert result is True + call_args = mock_api_client.connectapi.call_args + assert "/workout-service/workout/12345" in call_args[0][0] + assert call_args[1]["method"] == "DELETE" + + def test_schedule_workout(self, workout_client, mock_api_client): + """Test schedule_workout schedules workout for date.""" + mock_api_client.connectapi.return_value = None + + result = workout_client.schedule_workout(12345, "2024-01-15") + + assert result is True + call_args = mock_api_client.connectapi.call_args + assert "/workout-service/schedule/12345" in call_args[0][0] + assert call_args[1]["method"] == "POST" + assert call_args[1]["json"]["date"] == "2024-01-15" + + def test_unschedule_workout(self, workout_client, mock_api_client): + """Test unschedule_workout removes scheduled workout.""" + mock_api_client.connectapi.return_value = None + + result = workout_client.unschedule_workout(12345, "2024-01-15") + + assert result is True + call_args = mock_api_client.connectapi.call_args + assert "/workout-service/schedule/12345" in call_args[0][0] + assert call_args[1]["method"] == "DELETE" + + def test_download_fit(self, workout_client, mock_api_client): + """Test download_fit returns FIT file bytes.""" + mock_response = Mock() + mock_response.content = b"FIT file content" + mock_api_client.request.return_value = mock_response + + result = workout_client.download_fit(12345) + + assert result == b"FIT file content" + call_args = mock_api_client.request.call_args + assert call_args[0][0] == "GET" + assert "/workout-service/workout/FIT/12345" in call_args[0][2] + + def test_duplicate_workout(self, workout_client, mock_api_client): + """Test duplicate_workout creates copy of workout.""" + # Mock get_workout response + mock_api_client.connectapi.side_effect = [ + # First call: get_workout + { + "workoutId": 12345, + "workoutName": "Original Workout", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "description": "Original description", + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": {"sportTypeId": 2}, + "workoutSteps": [ + { + "type": "ExecutableStepDTO", + "stepType": {"stepTypeId": 1}, + "endCondition": {"conditionTypeId": 1}, + "targetType": {"targetTypeId": 1}, + "intensityType": {"intensityTypeId": 3}, + } + ], + } + ], + }, + # Second call: create_workout + { + "workoutId": 67890, + "workoutName": "Original Workout (Copy)", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [], + }, + ] + + result = workout_client.duplicate_workout(12345) + + assert result.workout_id == 67890 + assert "(Copy)" in result.name + + def test_duplicate_workout_with_new_name(self, workout_client, mock_api_client): + """Test duplicate_workout with custom name.""" + mock_api_client.connectapi.side_effect = [ + { + "workoutId": 12345, + "workoutName": "Original", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [], + }, + { + "workoutId": 67890, + "workoutName": "Custom Name", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [], + }, + ] + + result = workout_client.duplicate_workout(12345, new_name="Custom Name") + + # Verify the create call used the custom name + create_call = mock_api_client.connectapi.call_args_list[1] + assert create_call[1]["json"]["workoutName"] == "Custom Name" + + def test_duplicate_workout_not_found(self, workout_client, mock_api_client): + """Test duplicate_workout raises error when workout not found.""" + mock_api_client.connectapi.return_value = None + + with pytest.raises(ValueError, match="not found"): + workout_client.duplicate_workout(99999) + + +class TestWorkoutClientIntegration: + """Integration tests for WorkoutClient with real serialization.""" + + @pytest.fixture + def mock_api_client(self): + """Create a mock API client.""" + return MagicMock() + + @pytest.fixture + def workout_client(self, mock_api_client): + """Create a WorkoutClient with mocked API client.""" + return WorkoutClient(mock_api_client) + + def test_create_complex_workout(self, workout_client, mock_api_client): + """Test creating a complex workout with repeats.""" + workout = Workout( + name="Complex Workout", + sport_type=SportType.CYCLING, + description="Test", + ) + workout.add_step( + WorkoutStep( + step_type=StepType.WARMUP, + end_condition=EndCondition.time_minutes(10), + ) + ) + + repeat = RepeatGroup(iterations=3) + repeat.add_step( + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(5), + ) + ) + repeat.add_step( + WorkoutStep( + step_type=StepType.RECOVERY, + end_condition=EndCondition.time_minutes(2), + ) + ) + workout.add_step(repeat) + + workout.add_step( + WorkoutStep( + step_type=StepType.COOLDOWN, + end_condition=EndCondition.time_minutes(10), + ) + ) + + mock_api_client.connectapi.return_value = { + "workoutId": 12345, + "workoutName": "Complex Workout", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [], + } + + result = workout_client.create_workout(workout) + + # Verify the payload structure + call_args = mock_api_client.connectapi.call_args + payload = call_args[1]["json"] + + assert payload["workoutName"] == "Complex Workout" + assert len(payload["workoutSegments"][0]["workoutSteps"]) == 3 + + # Verify repeat group structure + repeat_step = payload["workoutSegments"][0]["workoutSteps"][1] + assert repeat_step["type"] == "RepeatGroupDTO" + assert len(repeat_step["workoutSteps"]) == 2 diff --git a/tests/test_workouts_constants.py b/tests/test_workouts_constants.py new file mode 100644 index 0000000..c7899ed --- /dev/null +++ b/tests/test_workouts_constants.py @@ -0,0 +1,166 @@ +"""Tests for garmy.workouts.constants module.""" + +import pytest + +from garmy.workouts.constants import ( + EndConditionType, + IntensityType, + SportType, + StepType, + TargetType, +) + + +class TestSportType: + """Test cases for SportType enum.""" + + def test_sport_type_values(self): + """Test SportType has correct id and key values.""" + assert SportType.RUNNING.id == 1 + assert SportType.RUNNING.key == "running" + + assert SportType.CYCLING.id == 2 + assert SportType.CYCLING.key == "cycling" + + assert SportType.STRENGTH.id == 5 + assert SportType.STRENGTH.key == "strength_training" + + # Verify other key sport types per Garmin API (IDs verified via testing) + assert SportType.WALKING.id == 12 + assert SportType.SWIMMING.id == 4 + assert SportType.CARDIO.id == 6 + assert SportType.YOGA.id == 7 + + def test_sport_type_from_id(self): + """Test SportType.from_id lookup.""" + assert SportType.from_id(1) == SportType.RUNNING + assert SportType.from_id(2) == SportType.CYCLING + assert SportType.from_id(4) == SportType.SWIMMING + + def test_sport_type_from_id_unknown(self): + """Test SportType.from_id returns OTHER for unknown IDs.""" + assert SportType.from_id(9999) == SportType.OTHER + + def test_sport_type_from_key(self): + """Test SportType.from_key lookup.""" + assert SportType.from_key("running") == SportType.RUNNING + assert SportType.from_key("cycling") == SportType.CYCLING + assert SportType.from_key("RUNNING") == SportType.RUNNING # Case insensitive + + def test_sport_type_from_key_unknown(self): + """Test SportType.from_key returns OTHER for unknown keys.""" + assert SportType.from_key("unknown_sport") == SportType.OTHER + + +class TestStepType: + """Test cases for StepType enum.""" + + def test_step_type_values(self): + """Test StepType values.""" + assert StepType.WARMUP.value == "warmup" + assert StepType.COOLDOWN.value == "cooldown" + assert StepType.INTERVAL.value == "interval" + assert StepType.RECOVERY.value == "recovery" + assert StepType.REST.value == "rest" + assert StepType.REPEAT.value == "repeat" + + def test_step_type_type_id(self): + """Test StepType.type_id property.""" + assert StepType.WARMUP.type_id == 1 + assert StepType.COOLDOWN.type_id == 2 + assert StepType.INTERVAL.type_id == 3 + assert StepType.RECOVERY.type_id == 4 + assert StepType.REST.type_id == 5 + assert StepType.REPEAT.type_id == 6 + + def test_step_type_from_type_id(self): + """Test StepType.from_type_id lookup.""" + assert StepType.from_type_id(1) == StepType.WARMUP + assert StepType.from_type_id(3) == StepType.INTERVAL + assert StepType.from_type_id(6) == StepType.REPEAT + + def test_step_type_from_type_id_unknown(self): + """Test StepType.from_type_id returns OTHER for unknown IDs.""" + assert StepType.from_type_id(9999) == StepType.OTHER + + +class TestEndConditionType: + """Test cases for EndConditionType enum.""" + + def test_end_condition_type_values(self): + """Test EndConditionType values.""" + assert EndConditionType.LAP_BUTTON.value == "lap.button" + assert EndConditionType.TIME.value == "time" + assert EndConditionType.DISTANCE.value == "distance" + assert EndConditionType.ITERATIONS.value == "iterations" + + def test_end_condition_type_condition_type_id(self): + """Test EndConditionType.condition_type_id property.""" + assert EndConditionType.LAP_BUTTON.condition_type_id == 1 + assert EndConditionType.TIME.condition_type_id == 2 + assert EndConditionType.DISTANCE.condition_type_id == 3 + assert EndConditionType.ITERATIONS.condition_type_id == 7 + + def test_end_condition_type_from_condition_type_id(self): + """Test EndConditionType.from_condition_type_id lookup.""" + assert EndConditionType.from_condition_type_id(1) == EndConditionType.LAP_BUTTON + assert EndConditionType.from_condition_type_id(2) == EndConditionType.TIME + assert EndConditionType.from_condition_type_id(3) == EndConditionType.DISTANCE + + def test_end_condition_type_from_condition_type_id_unknown(self): + """Test EndConditionType.from_condition_type_id returns LAP_BUTTON for unknown.""" + assert ( + EndConditionType.from_condition_type_id(9999) == EndConditionType.LAP_BUTTON + ) + + +class TestTargetType: + """Test cases for TargetType enum.""" + + def test_target_type_values(self): + """Test TargetType values.""" + assert TargetType.NO_TARGET.value == "no.target" + assert TargetType.POWER_ZONE.value == "power.zone" + assert TargetType.HEART_RATE_ZONE.value == "heart.rate.zone" + assert TargetType.CADENCE_ZONE.value == "cadence.zone" + + def test_target_type_target_type_id(self): + """Test TargetType.target_type_id property.""" + assert TargetType.NO_TARGET.target_type_id == 1 + assert TargetType.POWER_ZONE.target_type_id == 2 + assert TargetType.HEART_RATE_ZONE.target_type_id == 4 + + def test_target_type_from_target_type_id(self): + """Test TargetType.from_target_type_id lookup.""" + assert TargetType.from_target_type_id(1) == TargetType.NO_TARGET + assert TargetType.from_target_type_id(2) == TargetType.POWER_ZONE + + def test_target_type_from_target_type_id_unknown(self): + """Test TargetType.from_target_type_id returns NO_TARGET for unknown.""" + assert TargetType.from_target_type_id(9999) == TargetType.NO_TARGET + + +class TestIntensityType: + """Test cases for IntensityType enum.""" + + def test_intensity_type_values(self): + """Test IntensityType values.""" + assert IntensityType.ACTIVE.value == "active" + assert IntensityType.REST.value == "rest" + assert IntensityType.WARMUP.value == "warmup" + assert IntensityType.COOLDOWN.value == "cooldown" + + def test_intensity_type_intensity_type_id(self): + """Test IntensityType.intensity_type_id property.""" + assert IntensityType.ACTIVE.intensity_type_id == 1 + assert IntensityType.REST.intensity_type_id == 2 + assert IntensityType.WARMUP.intensity_type_id == 3 + + def test_intensity_type_from_intensity_type_id(self): + """Test IntensityType.from_intensity_type_id lookup.""" + assert IntensityType.from_intensity_type_id(1) == IntensityType.ACTIVE + assert IntensityType.from_intensity_type_id(3) == IntensityType.WARMUP + + def test_intensity_type_from_intensity_type_id_unknown(self): + """Test IntensityType.from_intensity_type_id returns ACTIVE for unknown.""" + assert IntensityType.from_intensity_type_id(9999) == IntensityType.ACTIVE diff --git a/tests/test_workouts_exercises.py b/tests/test_workouts_exercises.py new file mode 100644 index 0000000..16a559c --- /dev/null +++ b/tests/test_workouts_exercises.py @@ -0,0 +1,990 @@ +"""Tests for garmy.workouts.exercises module.""" + +import pytest + +from garmy.workouts.exercises import ( + ALIASES, + EXERCISES, + ExerciseMatcher, + MatchResult, + get_matcher, + resolve_exercise, + search_exercises, +) + + +class TestMatchResult: + """Test cases for MatchResult dataclass.""" + + def test_match_result_creation(self): + """Test creating a MatchResult.""" + result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=0.95, + ) + assert result.name == "BARBELL_BENCH_PRESS" + assert result.category == "BENCH_PRESS" + assert result.score == 0.95 + assert result.alternatives == [] + + def test_match_result_with_alternatives(self): + """Test MatchResult with alternatives.""" + alternatives = [ + ("DUMBBELL_BENCH_PRESS", "BENCH_PRESS", 0.90), + ("INCLINE_BARBELL_BENCH_PRESS", "BENCH_PRESS", 0.88), + ] + result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=0.95, + alternatives=alternatives, + ) + assert len(result.alternatives) == 2 + assert result.alternatives[0][0] == "DUMBBELL_BENCH_PRESS" + + def test_is_exact_property(self): + """Test is_exact property.""" + exact_result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=1.0, + ) + assert exact_result.is_exact is True + + fuzzy_result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=0.95, + ) + assert fuzzy_result.is_exact is False + + def test_is_confident_property(self): + """Test is_confident property.""" + confident_result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=0.85, + ) + assert confident_result.is_confident is True + + low_confidence_result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=0.75, + ) + assert low_confidence_result.is_confident is False + + def test_match_result_str(self): + """Test string representation of MatchResult.""" + result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=0.85, + ) + assert str(result) == "BARBELL_BENCH_PRESS (85%)" + + def test_match_result_frozen(self): + """Test that MatchResult is immutable (frozen).""" + result = MatchResult( + name="BARBELL_BENCH_PRESS", + category="BENCH_PRESS", + score=0.95, + ) + with pytest.raises(AttributeError): + result.name = "DUMBBELL_BENCH_PRESS" + + +class TestExerciseMatcherExactMatching: + """Test exact matching functionality.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_exact_match_screaming_snake_case(self, matcher): + """Test exact match with SCREAMING_SNAKE_CASE input.""" + result = matcher.resolve("BARBELL_BENCH_PRESS") + assert result is not None + assert result.name == "BARBELL_BENCH_PRESS" + assert result.category == "BENCH_PRESS" + assert result.is_exact is True + + def test_exact_match_lowercase(self, matcher): + """Test exact match with lowercase input.""" + result = matcher.resolve("barbell bench press") + assert result is not None + assert result.name == "BARBELL_BENCH_PRESS" + assert result.category == "BENCH_PRESS" + assert result.score >= 0.95 + + def test_exact_match_mixed_case(self, matcher): + """Test exact match with mixed case input.""" + result = matcher.resolve("Barbell Bench Press") + assert result is not None + assert result.name == "BARBELL_BENCH_PRESS" + assert result.category == "BENCH_PRESS" + + def test_exact_match_with_underscores(self, matcher): + """Test exact match with underscores instead of spaces.""" + result = matcher.resolve("barbell_bench_press") + assert result is not None + assert result.name == "BARBELL_BENCH_PRESS" + assert result.category == "BENCH_PRESS" + assert result.is_exact is True + + def test_exact_match_deadlift(self, matcher): + """Test exact match for DEADLIFT exercises.""" + result = matcher.resolve("BARBELL_DEADLIFT") + assert result is not None + assert result.name == "BARBELL_DEADLIFT" + assert result.category == "DEADLIFT" + + def test_exact_match_squat(self, matcher): + """Test exact match for SQUAT exercises.""" + result = matcher.resolve("BARBELL_BACK_SQUAT") + assert result is not None + assert result.name == "BARBELL_BACK_SQUAT" + assert result.category == "SQUAT" + + def test_exact_match_pull_up(self, matcher): + """Test exact match for PULL_UP exercises.""" + result = matcher.resolve("PULL_UP") + assert result is not None + assert result.name == "PULL_UP" + assert result.category == "PULL_UP" + + +class TestExerciseMatcherAliasMatching: + """Test alias-based matching.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_alias_bench_press(self, matcher): + """Test alias matching for 'bench press'.""" + result = matcher.resolve("bench press") + assert result is not None + # The alias points to BARBELL_BENCH_PRESS, but exact match for BENCH_PRESS wins + assert "BENCH_PRESS" in result.name + assert result.category == "BENCH_PRESS" + assert result.score >= 0.95 + + def test_alias_deadlift(self, matcher): + """Test alias matching for 'deadlift'.""" + result = matcher.resolve("deadlift") + assert result is not None + # The alias points to BARBELL_DEADLIFT, but exact match for DEADLIFT wins + assert "DEADLIFT" in result.name + assert result.category == "DEADLIFT" + assert result.score >= 0.95 + + def test_alias_squat(self, matcher): + """Test alias matching for 'squat'.""" + result = matcher.resolve("squat") + assert result is not None + # The alias points to BARBELL_BACK_SQUAT, but exact match for SQUAT wins + assert "SQUAT" in result.name + assert result.category == "SQUAT" + assert result.score >= 0.95 + + def test_alias_overhead_press(self, matcher): + """Test alias matching for 'overhead press'.""" + result = matcher.resolve("overhead press") + assert result is not None + assert result.category == "SHOULDER_PRESS" + assert result.score >= 0.80 + + def test_alias_curl(self, matcher): + """Test alias matching for 'curl'.""" + result = matcher.resolve("curl") + assert result is not None + assert result.name == "BARBELL_BICEPS_CURL" + assert result.category == "CURL" + + def test_alias_abbreviated_ohp(self, matcher): + """Test alias matching for 'ohp' abbreviation.""" + result = matcher.resolve("ohp") + assert result is not None + assert result.name == "OVERHEAD_PRESS" + assert result.score == 0.95 + + def test_alias_abbreviated_dl(self, matcher): + """Test alias matching for 'dl' abbreviation.""" + result = matcher.resolve("dl") + assert result is not None + assert result.name == "BARBELL_DEADLIFT" + + def test_alias_rdl(self, matcher): + """Test alias matching for 'rdl' abbreviation.""" + result = matcher.resolve("rdl") + assert result is not None + assert result.name == "ROMANIAN_DEADLIFT" + + def test_alias_pull_up(self, matcher): + """Test alias matching for 'pull up'.""" + result = matcher.resolve("pull up") + assert result is not None + assert result.name == "PULL_UP" + + def test_alias_push_up(self, matcher): + """Test alias matching for 'push up'.""" + result = matcher.resolve("push up") + assert result is not None + assert result.name == "PUSH_UP" + + def test_alias_dip(self, matcher): + """Test alias matching for 'dip'.""" + result = matcher.resolve("dip") + assert result is not None + assert "DIP" in result.name + assert result.category == "TRICEPS_EXTENSION" + + def test_alias_lat_pulldown(self, matcher): + """Test alias matching for 'lat pulldown'.""" + result = matcher.resolve("lat pulldown") + assert result is not None + assert result.name == "LAT_PULLDOWN" + + def test_alias_hamstring_curl(self, matcher): + """Test alias matching for 'hamstring curl'.""" + result = matcher.resolve("hamstring curl") + assert result is not None + assert "HAMSTRING" in result.name or "LEG_CURL" in result.name + + def test_alias_kettlebell_swing(self, matcher): + """Test alias matching for 'kettlebell swing'.""" + result = matcher.resolve("kettlebell swing") + assert result is not None + assert result.name == "KETTLEBELL_SWING" + + def test_alias_case_insensitive(self, matcher): + """Test that alias matching is case-insensitive.""" + result = matcher.resolve("BENCH PRESS") + assert result is not None + assert "BENCH_PRESS" in result.name + + +class TestExerciseMatcherFuzzyMatching: + """Test fuzzy matching with typos.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_typo_dumbel_curl(self, matcher): + """Test fuzzy matching with typo 'dumbel' instead of 'dumbbell'.""" + result = matcher.resolve("dumbel curl") + assert result is not None + # Should match something with curl + assert "CURL" in result.name or result.category == "CURL" + + def test_typo_bench_pres(self, matcher): + """Test fuzzy matching with typo 'pres' instead of 'press'.""" + result = matcher.resolve("bench pres") + assert result is not None + assert "BENCH_PRESS" in result.name + + def test_typo_deadlift_variation(self, matcher): + """Test fuzzy matching with typo in deadlift.""" + result = matcher.resolve("deadlift") + assert result is not None + assert "DEADLIFT" in result.name + + def test_single_character_typo_squat(self, matcher): + """Test fuzzy matching with single character typo.""" + result = matcher.resolve("sqauat") + assert result is not None + assert "SQUAT" in result.name + + def test_transposed_characters(self, matcher): + """Test fuzzy matching with transposed characters.""" + result = matcher.resolve("barbell benchpress") + assert result is not None + assert "BENCH_PRESS" in result.name + + def test_missing_word_fuzzy(self, matcher): + """Test fuzzy matching with missing word.""" + result = matcher.resolve("bench") + assert result is not None + assert "BENCH_PRESS" in result.name + + def test_extra_word_fuzzy(self, matcher): + """Test fuzzy matching with extra word.""" + result = matcher.resolve("heavy barbell bench press") + assert result is not None + assert result.name == "BARBELL_BENCH_PRESS" + + +class TestEquipmentHints: + """Test equipment hint functionality.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_equipment_hint_dumbbell_curl(self, matcher): + """Test equipment hint prefers DUMBBELL exercises.""" + result = matcher.resolve("curl", equipment_hint="dumbbell") + assert result is not None + # Without hint, would be BARBELL_BICEPS_CURL + # With hint, should prefer a dumbbell variant + assert result.score >= 0.75 + + def test_equipment_hint_barbell_row(self, matcher): + """Test equipment hint prefers BARBELL exercises.""" + result = matcher.resolve("row", equipment_hint="barbell") + assert result is not None + assert result.score >= 0.75 + + def test_equipment_hint_kettlebell_swing(self, matcher): + """Test equipment hint with kettlebell.""" + result = matcher.resolve("swing", equipment_hint="kettlebell") + assert result is not None + # Should find some swing-related exercise + assert "SWING" in result.name + + def test_equipment_hint_cable(self, matcher): + """Test equipment hint with cable.""" + result = matcher.resolve("row", equipment_hint="cable") + assert result is not None + assert result.score >= 0.75 + + def test_equipment_hint_case_insensitive(self, matcher): + """Test that equipment hint is case-insensitive.""" + result = matcher.resolve("curl", equipment_hint="DUMBBELL") + assert result is not None + assert result.score >= 0.75 + + def test_equipment_hint_with_exact_match(self, matcher): + """Test equipment hint doesn't break exact matches.""" + result = matcher.resolve("BARBELL_BENCH_PRESS", equipment_hint="dumbbell") + assert result is not None + assert result.name == "BARBELL_BENCH_PRESS" + assert result.is_exact is True + + +class TestSearchFunctionality: + """Test search functionality.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_search_basic(self, matcher): + """Test basic search returns results.""" + results = matcher.search("bench press") + assert isinstance(results, list) + assert len(results) > 0 + assert all(isinstance(r, MatchResult) for r in results) + + def test_search_returns_ranked_results(self, matcher): + """Test search returns results ranked by score.""" + results = matcher.search("bench press", limit=10) + assert len(results) > 0 + # Results should be sorted by score descending + scores = [r.score for r in results] + assert scores == sorted(scores, reverse=True) + + def test_search_limit_parameter(self, matcher): + """Test search respects limit parameter.""" + results = matcher.search("press", limit=5) + assert len(results) <= 5 + + def test_search_limit_default(self, matcher): + """Test search default limit is 10.""" + results = matcher.search("bench") + assert len(results) <= 10 + + def test_search_all_results_have_positive_score(self, matcher): + """Test all search results have positive scores.""" + results = matcher.search("curl", limit=10) + assert all(r.score > 0 for r in results) + + def test_search_squat(self, matcher): + """Test search for 'squat' returns multiple variants.""" + results = matcher.search("squat", limit=10) + assert len(results) > 0 + assert any("SQUAT" in r.name for r in results) + + def test_search_deadlift(self, matcher): + """Test search for 'deadlift' returns variants.""" + results = matcher.search("deadlift", limit=5) + assert len(results) > 0 + assert any("DEADLIFT" in r.name for r in results) + + def test_search_empty_query(self, matcher): + """Test search with empty query returns empty list.""" + results = matcher.search("") + assert results == [] + + def test_search_whitespace_only(self, matcher): + """Test search with whitespace-only query returns empty list.""" + results = matcher.search(" ") + assert results == [] + + def test_search_includes_high_score_results(self, matcher): + """Test search includes exact and high-confidence matches first.""" + results = matcher.search("barbell bench press", limit=5) + assert len(results) > 0 + # First result should be highly confident + assert results[0].is_confident + + +class TestCategoryFunctions: + """Test category-related functions.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_list_categories(self, matcher): + """Test list_categories returns non-empty list.""" + categories = matcher.list_categories() + assert isinstance(categories, list) + assert len(categories) > 0 + # Should be sorted + assert categories == sorted(categories) + + def test_list_categories_contains_common_categories(self, matcher): + """Test list_categories includes common exercise categories.""" + categories = matcher.list_categories() + assert "BENCH_PRESS" in categories + assert "DEADLIFT" in categories + assert "SQUAT" in categories + assert "CURL" in categories + assert "ROW" in categories + assert "PULL_UP" in categories + assert "PUSH_UP" in categories + + def test_get_category_exact_name(self, matcher): + """Test get_category with exact exercise name.""" + category = matcher.get_category("BARBELL_BENCH_PRESS") + assert category == "BENCH_PRESS" + + def test_get_category_normalized_name(self, matcher): + """Test get_category with normalized exercise name.""" + category = matcher.get_category("barbell bench press") + assert category == "BENCH_PRESS" + + def test_get_category_not_found(self, matcher): + """Test get_category returns None for unknown exercise.""" + category = matcher.get_category("NONEXISTENT_EXERCISE") + assert category is None + + def test_list_by_category_bench_press(self, matcher): + """Test list_by_category for BENCH_PRESS.""" + exercises = matcher.list_by_category("BENCH_PRESS") + assert isinstance(exercises, list) + assert len(exercises) > 0 + # All should contain BENCH_PRESS or related terms + assert "BARBELL_BENCH_PRESS" in exercises + assert all(isinstance(e, str) for e in exercises) + # Should be sorted + assert exercises == sorted(exercises) + + def test_list_by_category_case_insensitive(self, matcher): + """Test list_by_category is case-insensitive.""" + exercises_upper = matcher.list_by_category("BENCH_PRESS") + exercises_lower = matcher.list_by_category("bench_press") + assert exercises_upper == exercises_lower + + def test_list_by_category_all_returned_are_valid(self, matcher): + """Test all returned exercises belong to the category.""" + exercises = matcher.list_by_category("DEADLIFT") + for exercise in exercises: + assert EXERCISES[exercise] == "DEADLIFT" + + def test_list_by_category_deadlift(self, matcher): + """Test list_by_category for DEADLIFT.""" + exercises = matcher.list_by_category("DEADLIFT") + assert len(exercises) > 0 + assert "BARBELL_DEADLIFT" in exercises + + def test_list_by_category_curl(self, matcher): + """Test list_by_category for CURL.""" + exercises = matcher.list_by_category("CURL") + assert len(exercises) > 0 + assert "BARBELL_BICEPS_CURL" in exercises + + +class TestEquipmentFiltering: + """Test equipment filtering functionality.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_list_by_equipment_dumbbell(self, matcher): + """Test list_by_equipment for DUMBBELL.""" + exercises = matcher.list_by_equipment("DUMBBELL") + assert isinstance(exercises, list) + assert len(exercises) > 0 + # All should start with DUMBBELL + assert all(e.startswith("DUMBBELL") for e in exercises) + assert "DUMBBELL_BENCH_PRESS" in exercises + + def test_list_by_equipment_barbell(self, matcher): + """Test list_by_equipment for BARBELL.""" + exercises = matcher.list_by_equipment("BARBELL") + assert len(exercises) > 0 + assert all(e.startswith("BARBELL") for e in exercises) + assert "BARBELL_BENCH_PRESS" in exercises + + def test_list_by_equipment_kettlebell(self, matcher): + """Test list_by_equipment for KETTLEBELL.""" + exercises = matcher.list_by_equipment("KETTLEBELL") + assert len(exercises) > 0 + assert all(e.startswith("KETTLEBELL") for e in exercises) + + def test_list_by_equipment_cable(self, matcher): + """Test list_by_equipment for CABLE.""" + exercises = matcher.list_by_equipment("CABLE") + assert len(exercises) > 0 + assert all(e.startswith("CABLE") for e in exercises) + + def test_list_by_equipment_case_insensitive(self, matcher): + """Test list_by_equipment is case-insensitive.""" + exercises_upper = matcher.list_by_equipment("DUMBBELL") + exercises_lower = matcher.list_by_equipment("dumbbell") + assert exercises_upper == exercises_lower + + def test_list_by_equipment_sorted(self, matcher): + """Test list_by_equipment returns sorted results.""" + exercises = matcher.list_by_equipment("DUMBBELL") + assert exercises == sorted(exercises) + + +class TestEdgeCases: + """Test edge cases and error conditions.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_empty_input_returns_none(self, matcher): + """Test empty input returns None.""" + result = matcher.resolve("") + assert result is None + + def test_whitespace_only_returns_none(self, matcher): + """Test whitespace-only input returns None.""" + result = matcher.resolve(" ") + assert result is None + result = matcher.resolve("\t\n") + assert result is None + + def test_gibberish_input_returns_none_or_low_score(self, matcher): + """Test gibberish input returns None or below threshold.""" + result = matcher.resolve("xyzabc123def456") + # Should either return None or have very low score + if result is not None: + assert result.score < 0.5 + + def test_resolve_or_raise_with_match(self, matcher): + """Test resolve_or_raise returns result when match found.""" + result = matcher.resolve_or_raise("bench press") + assert isinstance(result, MatchResult) + assert "BENCH_PRESS" in result.name + + def test_resolve_or_raise_without_match_raises(self, matcher): + """Test resolve_or_raise raises ValueError when no match.""" + with pytest.raises(ValueError) as exc_info: + matcher.resolve_or_raise("xyzabc123") + assert "No match found" in str(exc_info.value) + + def test_resolve_or_raise_includes_suggestions(self, matcher): + """Test resolve_or_raise exception includes suggestions.""" + with pytest.raises(ValueError) as exc_info: + matcher.resolve_or_raise("xyzabc123garbagetext") # Gibberish that won't match + error_msg = str(exc_info.value) + assert "No match found" in error_msg + + def test_very_long_input(self, matcher): + """Test handling of very long input.""" + long_input = "bench press " * 100 + result = matcher.resolve(long_input) + # Should still work or return None gracefully + assert result is None or isinstance(result, MatchResult) + + def test_special_characters_stripped(self, matcher): + """Test that special characters are handled.""" + result = matcher.resolve("bench press") + assert result is not None + assert "BENCH_PRESS" in result.name + + def test_numeric_characters_preserved(self, matcher): + """Test numeric characters in input.""" + result = matcher.resolve("45_degree_plank") + # Should find the exercise if it exists + assert result is not None or isinstance(result, type(None)) + + def test_threshold_parameter(self): + """Test creating matcher with custom threshold.""" + matcher_strict = ExerciseMatcher(threshold=0.9) + matcher_lenient = ExerciseMatcher(threshold=0.3) + + # Both should find exact matches + assert matcher_strict.resolve("BARBELL_BENCH_PRESS") is not None + assert matcher_lenient.resolve("BARBELL_BENCH_PRESS") is not None + + # Lenient should find more fuzzy matches + fuzzy_results_lenient = matcher_lenient.search("bench", limit=100) + fuzzy_results_strict = matcher_strict.search("bench", limit=100) + # Lenient might find more low-scoring results + assert len(fuzzy_results_lenient) >= len(fuzzy_results_strict) + + +class TestModuleLevelFunctions: + """Test module-level convenience functions.""" + + def test_get_matcher_returns_singleton(self): + """Test get_matcher returns same instance.""" + matcher1 = get_matcher() + matcher2 = get_matcher() + assert matcher1 is matcher2 + + def test_get_matcher_returns_exercise_matcher(self): + """Test get_matcher returns ExerciseMatcher.""" + matcher = get_matcher() + assert isinstance(matcher, ExerciseMatcher) + + def test_resolve_exercise_returns_tuple(self): + """Test resolve_exercise returns (name, category) tuple.""" + name, category = resolve_exercise("bench press") + assert isinstance(name, str) + assert isinstance(category, str) + assert "BENCH_PRESS" in name + assert category == "BENCH_PRESS" + + def test_resolve_exercise_raises_on_no_match(self): + """Test resolve_exercise raises ValueError on no match.""" + with pytest.raises(ValueError): + resolve_exercise("xyzabc123") + + def test_resolve_exercise_with_equipment_hint(self): + """Test resolve_exercise accepts equipment_hint.""" + name, category = resolve_exercise("curl", equipment_hint="dumbbell") + assert isinstance(name, str) + assert isinstance(category, str) + + def test_search_exercises_returns_list(self): + """Test search_exercises returns list.""" + results = search_exercises("bench") + assert isinstance(results, list) + assert all(isinstance(r, MatchResult) for r in results) + + def test_search_exercises_with_limit(self): + """Test search_exercises accepts limit parameter.""" + results = search_exercises("bench", limit=5) + assert len(results) <= 5 + + def test_search_exercises_empty_query(self): + """Test search_exercises with empty query.""" + results = search_exercises("") + assert results == [] + + +class TestConsistency: + """Test consistency across different matching methods.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_resolve_vs_search_consistency(self, matcher): + """Test that resolve and search return consistent results.""" + # For a given query, resolve should return top search result + query = "bench press" + resolve_result = matcher.resolve(query) + search_results = matcher.search(query, limit=1) + + if resolve_result and search_results: + assert resolve_result.name == search_results[0].name + + def test_get_category_matches_exercises_db(self, matcher): + """Test get_category matches EXERCISES database.""" + for exercise_name, expected_category in list(EXERCISES.items())[:50]: + category = matcher.get_category(exercise_name) + assert category == expected_category + + def test_list_by_category_consistency(self, matcher): + """Test list_by_category is consistent.""" + categories = matcher.list_categories() + for category in categories: + exercises = matcher.list_by_category(category) + # All returned exercises should belong to this category + for exercise in exercises: + assert EXERCISES[exercise] == category + + def test_list_by_equipment_consistency(self, matcher): + """Test list_by_equipment is consistent.""" + for equipment in ["DUMBBELL", "BARBELL", "KETTLEBELL"]: + exercises = matcher.list_by_equipment(equipment) + for exercise in exercises: + assert exercise.startswith(equipment) + + def test_alias_consistency(self, matcher): + """Test all aliases map to valid exercises.""" + for alias, target in list(ALIASES.items())[:10]: # Test first 10 aliases + if target in EXERCISES: + # Should be able to resolve this alias + result = matcher.resolve(alias) + assert result is not None + # The result should be valid + assert result.name in EXERCISES + + +class TestPerformance: + """Test performance characteristics.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_resolve_completes_quickly(self, matcher): + """Test resolve completes in reasonable time.""" + import time + + start = time.time() + for _ in range(100): + matcher.resolve("bench press") + elapsed = time.time() - start + # Should complete 100 queries in less than 1 second + assert elapsed < 1.0 + + def test_search_completes_quickly(self, matcher): + """Test search completes in reasonable time.""" + import time + + start = time.time() + for _ in range(20): + matcher.search("bench", limit=10) + elapsed = time.time() - start + # Should complete 20 searches in less than 2 seconds + assert elapsed < 2.0 + + def test_list_categories_completes_quickly(self, matcher): + """Test list_categories completes quickly.""" + import time + + start = time.time() + for _ in range(100): + matcher.list_categories() + elapsed = time.time() - start + # Should be very fast + assert elapsed < 0.1 + + +class TestRegressions: + """Test specific exercise matching scenarios.""" + + @pytest.fixture + def matcher(self): + """Create an ExerciseMatcher instance.""" + return ExerciseMatcher() + + def test_face_pull_resolves(self, matcher): + """Test face pull resolves correctly.""" + result = matcher.resolve("face pull") + assert result is not None + assert "FACE_PULL" in result.name + + def test_abs_wheel_resolves(self, matcher): + """Test ab wheel resolves correctly.""" + result = matcher.resolve("ab wheel") + assert result is not None + assert result.name == "AB_WHEEL_ROLLOUT" + + def test_goblet_squat_resolves(self, matcher): + """Test goblet squat resolves correctly.""" + result = matcher.resolve("goblet squat") + assert result is not None + assert result.name == "GOBLET_SQUAT" + + def test_hack_squat_resolves(self, matcher): + """Test hack squat resolves correctly.""" + result = matcher.resolve("hack squat") + assert result is not None + assert "HACK_SQUAT" in result.name + + def test_leg_press_resolves(self, matcher): + """Test leg press resolves correctly.""" + result = matcher.resolve("leg press") + assert result is not None + assert result.name == "LEG_PRESS" + + def test_leg_extension_resolves(self, matcher): + """Test leg extension resolves correctly.""" + result = matcher.resolve("leg extension") + assert result is not None + assert result.name == "LEG_EXTENSIONS" + + def test_nordic_curl_resolves(self, matcher): + """Test nordic hamstring curl resolves.""" + result = matcher.resolve("nordic hamstring curl") + assert result is not None + assert "NORDIC" in result.name or "HAMSTRING" in result.name + + def test_pallof_press_resolves(self, matcher): + """Test pallof press resolves correctly.""" + result = matcher.resolve("pallof press") + assert result is not None + assert result.name == "PALLOF_PRESS" + + def test_woodchop_resolves(self, matcher): + """Test woodchop resolves correctly.""" + result = matcher.resolve("woodchop") + assert result is not None + assert "WOODCHOP" in result.name or "CHOP" in result.name + + def test_hollow_body_hold_resolves(self, matcher): + """Test hollow body hold resolves correctly.""" + result = matcher.resolve("hollow body hold") + assert result is not None + assert result.name == "HOLLOW_BODY_HOLD" + + def test_arnold_press_resolves(self, matcher): + """Test arnold press resolves correctly.""" + result = matcher.resolve("arnold press") + assert result is not None + assert result.name == "ARNOLD_PRESS" + + def test_battle_rope_resolves(self, matcher): + """Test battle rope resolves correctly.""" + result = matcher.resolve("battle rope") + assert result is not None + assert "BATTLE_ROPE" in result.name + + +class TestDataIntegrity: + """Test that the exercise database is properly configured.""" + + def test_exercises_dict_not_empty(self): + """Test EXERCISES dict is not empty.""" + assert len(EXERCISES) > 0 + + def test_aliases_dict_not_empty(self): + """Test ALIASES dict is not empty.""" + assert len(ALIASES) > 0 + + def test_all_exercise_names_are_strings(self): + """Test all exercise names are strings.""" + assert all(isinstance(k, str) for k in EXERCISES.keys()) + + def test_all_categories_are_strings(self): + """Test all categories are strings.""" + assert all(isinstance(v, str) for v in EXERCISES.values()) + + def test_all_aliases_point_to_valid_exercises(self): + """Test all aliases point to exercises in EXERCISES.""" + for alias, target in ALIASES.items(): + # Target should be in EXERCISES or be transformable to it + assert isinstance(target, str) + + def test_exercises_have_reasonable_count(self): + """Test EXERCISES dict has a reasonable number of entries.""" + # Should have at least 100 exercises + assert len(EXERCISES) > 100 + + def test_categories_are_uppercase(self): + """Test all category names follow SCREAMING_SNAKE_CASE.""" + categories = set(EXERCISES.values()) + for category in categories: + assert category == category.upper() + assert "_" in category or len(category) < 20 + + +class TestNormalization: + """Test text normalization logic.""" + + def test_normalize_removes_special_chars(self): + """Test normalization removes special characters.""" + matcher = ExerciseMatcher() + normalized = matcher._normalize("bench-press!") + assert "!" not in normalized + assert "-" not in normalized + + def test_normalize_lowercase(self): + """Test normalization converts to lowercase.""" + matcher = ExerciseMatcher() + normalized = matcher._normalize("BARBELL_BENCH_PRESS") + assert normalized == normalized.lower() + + def test_normalize_spaces_to_underscores(self): + """Test normalization replaces spaces with underscores.""" + matcher = ExerciseMatcher() + normalized = matcher._normalize("barbell bench press") + assert " " not in normalized + assert normalized == "barbell_bench_press" + + def test_normalize_strips_whitespace(self): + """Test normalization strips leading/trailing whitespace.""" + matcher = ExerciseMatcher() + normalized = matcher._normalize(" bench press ") + assert normalized == "bench_press" + assert not normalized.startswith(" ") + assert not normalized.endswith(" ") + + +class TestTokenization: + """Test tokenization logic.""" + + def test_tokenize_splits_by_underscores(self): + """Test tokenization splits by underscores.""" + matcher = ExerciseMatcher() + tokens = matcher._tokenize("barbell_bench_press") + assert "barbell" in tokens + assert "bench" in tokens + assert "press" in tokens + + def test_tokenize_splits_by_spaces(self): + """Test tokenization splits by spaces.""" + matcher = ExerciseMatcher() + tokens = matcher._tokenize("barbell bench press") + assert len(tokens) == 3 + + def test_tokenize_returns_set(self): + """Test tokenization returns a set.""" + matcher = ExerciseMatcher() + tokens = matcher._tokenize("bench press") + assert isinstance(tokens, set) + + +class TestLevenshteinSimilarity: + """Test Levenshtein similarity calculation.""" + + def test_levenshtein_exact_match(self): + """Test Levenshtein distance for identical strings.""" + matcher = ExerciseMatcher() + ratio = matcher._levenshtein_ratio("bench", "bench") + assert ratio == 1.0 + + def test_levenshtein_completely_different(self): + """Test Levenshtein distance for completely different strings.""" + matcher = ExerciseMatcher() + ratio = matcher._levenshtein_ratio("abc", "xyz") + assert ratio < 0.5 + + def test_levenshtein_single_char_difference(self): + """Test Levenshtein distance with single character difference.""" + matcher = ExerciseMatcher() + ratio = matcher._levenshtein_ratio("bench", "bencH") + assert ratio >= 0.8 + + def test_levenshtein_typo_dumbbell(self): + """Test Levenshtein distance for common typo.""" + matcher = ExerciseMatcher() + ratio = matcher._levenshtein_ratio("dumbbell", "dumbel") + assert ratio >= 0.7 diff --git a/tests/test_workouts_models.py b/tests/test_workouts_models.py new file mode 100644 index 0000000..390e6eb --- /dev/null +++ b/tests/test_workouts_models.py @@ -0,0 +1,281 @@ +"""Tests for garmy.workouts.models module.""" + +import pytest + +from garmy.workouts.constants import ( + EndConditionType, + IntensityType, + SportType, + StepType, + TargetType, +) +from garmy.workouts.models import ( + EndCondition, + RepeatGroup, + Target, + Workout, + WorkoutStep, +) + + +class TestEndCondition: + """Test cases for EndCondition dataclass.""" + + def test_end_condition_default(self): + """Test EndCondition defaults to lap button.""" + condition = EndCondition() + assert condition.condition_type == EndConditionType.LAP_BUTTON + assert condition.value is None + + def test_end_condition_time(self): + """Test EndCondition.time factory method.""" + condition = EndCondition.time(300) + assert condition.condition_type == EndConditionType.TIME + assert condition.value == 300 + + def test_end_condition_time_minutes(self): + """Test EndCondition.time_minutes factory method.""" + condition = EndCondition.time_minutes(5) + assert condition.condition_type == EndConditionType.TIME + assert condition.value == 300 # 5 minutes = 300 seconds + + def test_end_condition_distance(self): + """Test EndCondition.distance factory method.""" + condition = EndCondition.distance(1000) + assert condition.condition_type == EndConditionType.DISTANCE + assert condition.value == 1000 + + def test_end_condition_distance_km(self): + """Test EndCondition.distance_km factory method.""" + condition = EndCondition.distance_km(5) + assert condition.condition_type == EndConditionType.DISTANCE + assert condition.value == 5000 # 5 km = 5000 meters + + def test_end_condition_distance_miles(self): + """Test EndCondition.distance_miles factory method.""" + condition = EndCondition.distance_miles(1) + assert condition.condition_type == EndConditionType.DISTANCE + assert condition.value == pytest.approx(1609.344, rel=0.01) + + def test_end_condition_lap_button(self): + """Test EndCondition.lap_button factory method.""" + condition = EndCondition.lap_button() + assert condition.condition_type == EndConditionType.LAP_BUTTON + assert condition.value is None + + def test_end_condition_iterations(self): + """Test EndCondition.iterations factory method.""" + condition = EndCondition.iterations(3) + assert condition.condition_type == EndConditionType.ITERATIONS + assert condition.value == 3.0 + + def test_end_condition_reps(self): + """Test EndCondition.reps factory method.""" + condition = EndCondition.reps(10) + assert condition.condition_type == EndConditionType.REPS + assert condition.value == 10.0 + + +class TestTarget: + """Test cases for Target dataclass.""" + + def test_target_default(self): + """Test Target defaults to no target.""" + target = Target() + assert target.target_type == TargetType.NO_TARGET + assert target.value_low is None + assert target.value_high is None + assert target.zone_number is None + + def test_target_no_target(self): + """Test Target.no_target factory method.""" + target = Target.no_target() + assert target.target_type == TargetType.NO_TARGET + + def test_target_power_zone(self): + """Test Target.power_zone factory method.""" + target = Target.power_zone(88, 93) + assert target.target_type == TargetType.POWER_ZONE + assert target.value_low == 88 + assert target.value_high == 93 + + def test_target_heart_rate_zone(self): + """Test Target.heart_rate_zone factory method.""" + target = Target.heart_rate_zone(70, 80) + assert target.target_type == TargetType.HEART_RATE_ZONE + assert target.value_low == 70 + assert target.value_high == 80 + + def test_target_cadence_zone(self): + """Test Target.cadence_zone factory method.""" + target = Target.cadence_zone(85, 95) + assert target.target_type == TargetType.CADENCE_ZONE + assert target.value_low == 85 + assert target.value_high == 95 + + def test_target_pace_zone(self): + """Test Target.pace_zone factory method.""" + target = Target.pace_zone(240, 270) # 4:00-4:30 min/km + assert target.target_type == TargetType.PACE_ZONE + assert target.value_low == 240 + assert target.value_high == 270 + + +class TestWorkoutStep: + """Test cases for WorkoutStep dataclass.""" + + def test_workout_step_default(self): + """Test WorkoutStep default values.""" + step = WorkoutStep() + assert step.step_type == StepType.OTHER + assert step.end_condition.condition_type == EndConditionType.LAP_BUTTON + assert step.target.target_type == TargetType.NO_TARGET + assert step.description is None + assert step.step_order is None + + def test_workout_step_with_values(self): + """Test WorkoutStep with custom values.""" + step = WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(5), + target=Target.power_zone(90, 95), + description="Hard interval", + ) + assert step.step_type == StepType.INTERVAL + assert step.end_condition.value == 300 + assert step.target.value_low == 90 + assert step.description == "Hard interval" + + def test_workout_step_intensity_auto_set(self): + """Test WorkoutStep auto-sets intensity based on step type.""" + warmup = WorkoutStep(step_type=StepType.WARMUP) + assert warmup.intensity == IntensityType.WARMUP + + cooldown = WorkoutStep(step_type=StepType.COOLDOWN) + assert cooldown.intensity == IntensityType.COOLDOWN + + recovery = WorkoutStep(step_type=StepType.RECOVERY) + assert recovery.intensity == IntensityType.RECOVERY + + +class TestRepeatGroup: + """Test cases for RepeatGroup dataclass.""" + + def test_repeat_group_default(self): + """Test RepeatGroup default values.""" + group = RepeatGroup() + assert group.iterations == 1 + assert group.steps == [] + assert group.step_order is None + + def test_repeat_group_with_iterations(self): + """Test RepeatGroup with custom iterations.""" + group = RepeatGroup(iterations=5) + assert group.iterations == 5 + + def test_repeat_group_add_step(self): + """Test RepeatGroup.add_step method.""" + group = RepeatGroup(iterations=3) + step = WorkoutStep(step_type=StepType.INTERVAL) + + result = group.add_step(step) + + assert result is group # Returns self for chaining + assert len(group.steps) == 1 + assert group.steps[0] == step + + def test_repeat_group_multiple_steps(self): + """Test RepeatGroup with multiple steps.""" + group = RepeatGroup(iterations=2) + interval = WorkoutStep(step_type=StepType.INTERVAL) + recovery = WorkoutStep(step_type=StepType.RECOVERY) + + group.add_step(interval).add_step(recovery) + + assert len(group.steps) == 2 + assert group.steps[0].step_type == StepType.INTERVAL + assert group.steps[1].step_type == StepType.RECOVERY + + +class TestWorkout: + """Test cases for Workout dataclass.""" + + def test_workout_default(self): + """Test Workout default values.""" + workout = Workout(name="Test Workout") + assert workout.name == "Test Workout" + assert workout.sport_type == SportType.CYCLING + assert workout.description is None + assert workout.steps == [] + assert workout.workout_id is None + assert workout.owner_id is None + + def test_workout_with_values(self): + """Test Workout with custom values.""" + workout = Workout( + name="My Workout", + sport_type=SportType.RUNNING, + description="A great workout", + workout_id=12345, + ) + assert workout.name == "My Workout" + assert workout.sport_type == SportType.RUNNING + assert workout.description == "A great workout" + assert workout.workout_id == 12345 + + def test_workout_add_step(self): + """Test Workout.add_step method.""" + workout = Workout(name="Test") + step = WorkoutStep(step_type=StepType.WARMUP) + + result = workout.add_step(step) + + assert result is workout # Returns self for chaining + assert len(workout.steps) == 1 + assert workout.steps[0] == step + + def test_workout_str(self): + """Test Workout string representation.""" + workout = Workout( + name="Test Workout", + sport_type=SportType.CYCLING, + description="Test description", + ) + workout.add_step(WorkoutStep(step_type=StepType.WARMUP)) + workout.add_step(WorkoutStep(step_type=StepType.INTERVAL)) + + result = str(workout) + + assert "Test Workout" in result + assert "cycling" in result + + def test_workout_to_dict(self): + """Test Workout.to_dict method.""" + workout = Workout( + name="Test Workout", + sport_type=SportType.RUNNING, + description="Test", + workout_id=123, + ) + + result = workout.to_dict() + + assert result["name"] == "Test Workout" + assert result["sport_type"] == "running" + assert result["description"] == "Test" + assert result["workout_id"] == 123 + + def test_workout_with_repeat_group(self): + """Test Workout with repeat groups affects step count calculation.""" + workout = Workout(name="Test") + workout.add_step(WorkoutStep(step_type=StepType.WARMUP)) + + repeat = RepeatGroup(iterations=3) + repeat.add_step(WorkoutStep(step_type=StepType.INTERVAL)) + repeat.add_step(WorkoutStep(step_type=StepType.RECOVERY)) + workout.add_step(repeat) + + workout.add_step(WorkoutStep(step_type=StepType.COOLDOWN)) + + # 3 top-level items: warmup, repeat, cooldown + assert len(workout.steps) == 3 diff --git a/tests/test_workouts_serializer.py b/tests/test_workouts_serializer.py new file mode 100644 index 0000000..c8622cd --- /dev/null +++ b/tests/test_workouts_serializer.py @@ -0,0 +1,410 @@ +"""Tests for garmy.workouts.serializer module.""" + +import pytest + +from garmy.workouts.constants import ( + EndConditionType, + IntensityType, + SportType, + StepType, + TargetType, +) +from garmy.workouts.models import ( + EndCondition, + RepeatGroup, + Target, + Workout, + WorkoutStep, +) +from garmy.workouts.serializer import WorkoutSerializer + + +class TestWorkoutSerializerToApiFormat: + """Test cases for WorkoutSerializer.to_api_format.""" + + def test_basic_workout(self): + """Test serializing a basic workout.""" + workout = Workout( + name="Test Workout", + sport_type=SportType.CYCLING, + description="Test description", + ) + warmup = WorkoutStep( + step_type=StepType.WARMUP, + end_condition=EndCondition.time_minutes(10), + ) + workout.add_step(warmup) + + result = WorkoutSerializer.to_api_format(workout) + + assert result["workoutName"] == "Test Workout" + assert result["description"] == "Test description" + # Note: We only use sportTypeKey (not ID) because Garmin's IDs are inconsistent + assert result["sportType"]["sportTypeKey"] == "cycling" + assert len(result["workoutSegments"]) == 1 + assert len(result["workoutSegments"][0]["workoutSteps"]) == 1 + + def test_step_serialization(self): + """Test step serialization details.""" + workout = Workout(name="Test") + step = WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time(300), + target=Target.power_zone(88, 93), + description="Main set", + ) + workout.add_step(step) + + result = WorkoutSerializer.to_api_format(workout) + step_data = result["workoutSegments"][0]["workoutSteps"][0] + + assert step_data["type"] == "ExecutableStepDTO" + assert step_data["stepOrder"] == 1 + assert step_data["stepType"]["stepTypeId"] == 3 # INTERVAL + assert step_data["stepType"]["stepTypeKey"] == "interval" + assert step_data["endCondition"]["conditionTypeId"] == 2 # TIME + # endConditionValue is at step level (not inside endCondition) + assert step_data["endConditionValue"] == 300 + # Garmin uses workoutTargetTypeId (not targetTypeId) + assert step_data["targetType"]["workoutTargetTypeId"] == 2 # POWER_ZONE + assert step_data["targetType"]["targetValueOne"] == 88 + assert step_data["targetType"]["targetValueTwo"] == 93 + assert step_data["description"] == "Main set" + + def test_repeat_group_serialization(self): + """Test repeat group serialization.""" + workout = Workout(name="Test") + repeat = RepeatGroup(iterations=3) + repeat.add_step( + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(5), + ) + ) + repeat.add_step( + WorkoutStep( + step_type=StepType.RECOVERY, + end_condition=EndCondition.time_minutes(2), + ) + ) + workout.add_step(repeat) + + result = WorkoutSerializer.to_api_format(workout) + repeat_data = result["workoutSegments"][0]["workoutSteps"][0] + + assert repeat_data["type"] == "RepeatGroupDTO" + assert repeat_data["stepOrder"] == 1 + # Garmin uses numberOfIterations for repeat groups + assert repeat_data["numberOfIterations"] == 3 + assert repeat_data["endConditionValue"] == 3.0 + assert len(repeat_data["workoutSteps"]) == 2 + + def test_step_ordering(self): + """Test steps have correct ordering.""" + workout = Workout(name="Test") + workout.add_step(WorkoutStep(step_type=StepType.WARMUP)) + workout.add_step(WorkoutStep(step_type=StepType.INTERVAL)) + workout.add_step(WorkoutStep(step_type=StepType.COOLDOWN)) + + result = WorkoutSerializer.to_api_format(workout) + steps = result["workoutSegments"][0]["workoutSteps"] + + assert steps[0]["stepOrder"] == 1 + assert steps[1]["stepOrder"] == 2 + assert steps[2]["stepOrder"] == 3 + + def test_workout_with_id(self): + """Test serializing workout with existing ID.""" + workout = Workout( + name="Test", + workout_id=12345, + owner_id=67890, + ) + + result = WorkoutSerializer.to_api_format(workout) + + assert result["workoutId"] == 12345 + assert result["ownerId"] == 67890 + + def test_no_target_serialization(self): + """Test step with no target.""" + workout = Workout(name="Test") + step = WorkoutStep( + step_type=StepType.REST, + target=Target.no_target(), + ) + workout.add_step(step) + + result = WorkoutSerializer.to_api_format(workout) + step_data = result["workoutSegments"][0]["workoutSteps"][0] + + # Garmin uses workoutTargetTypeId (not targetTypeId) + assert step_data["targetType"]["workoutTargetTypeId"] == 1 # NO_TARGET + + def test_lap_button_serialization(self): + """Test step with lap button end condition.""" + workout = Workout(name="Test") + step = WorkoutStep( + step_type=StepType.WARMUP, + end_condition=EndCondition.lap_button(), + ) + workout.add_step(step) + + result = WorkoutSerializer.to_api_format(workout) + step_data = result["workoutSegments"][0]["workoutSteps"][0] + + assert step_data["endCondition"]["conditionTypeId"] == 1 # LAP_BUTTON + assert "conditionValue" not in step_data["endCondition"] + + +class TestWorkoutSerializerFromApiFormat: + """Test cases for WorkoutSerializer.from_api_format.""" + + def test_basic_workout_parsing(self): + """Test parsing a basic workout from API format.""" + api_data = { + "workoutId": 12345, + "workoutName": "Test Workout", + "description": "Test description", + "sportType": { + "sportTypeId": 2, + "sportTypeKey": "cycling", + }, + "ownerId": 67890, + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSteps": [], + } + ], + } + + workout = WorkoutSerializer.from_api_format(api_data) + + assert workout.workout_id == 12345 + assert workout.name == "Test Workout" + assert workout.description == "Test description" + assert workout.sport_type == SportType.CYCLING + assert workout.owner_id == 67890 + + def test_step_parsing(self): + """Test parsing steps from API format.""" + api_data = { + "workoutName": "Test", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSteps": [ + { + "type": "ExecutableStepDTO", + "stepOrder": 1, + "stepType": { + "stepTypeId": 1, + "stepTypeKey": "warmup", + }, + "intensityType": { + "intensityTypeId": 3, + "intensityTypeKey": "warmup", + }, + "endCondition": { + "conditionTypeId": 2, + "conditionTypeKey": "time", + "conditionValue": 600, + }, + "targetType": { + "targetTypeId": 2, + "targetTypeKey": "power.zone", + "targetValueLow": 50, + "targetValueHigh": 60, + }, + "description": "Easy warmup", + } + ], + } + ], + } + + workout = WorkoutSerializer.from_api_format(api_data) + + assert len(workout.steps) == 1 + step = workout.steps[0] + assert step.step_type == StepType.WARMUP + assert step.end_condition.condition_type == EndConditionType.TIME + assert step.end_condition.value == 600 + assert step.target.target_type == TargetType.POWER_ZONE + assert step.target.value_low == 50 + assert step.target.value_high == 60 + assert step.description == "Easy warmup" + + def test_repeat_group_parsing(self): + """Test parsing repeat groups from API format.""" + api_data = { + "workoutName": "Test", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSteps": [ + { + "type": "RepeatGroupDTO", + "stepOrder": 1, + "endCondition": { + "conditionTypeId": 7, + "conditionTypeKey": "iterations", + "conditionValue": 3, + }, + "workoutSteps": [ + { + "type": "ExecutableStepDTO", + "stepOrder": 1, + "stepType": { + "stepTypeId": 3, + "stepTypeKey": "interval", + }, + "intensityType": { + "intensityTypeId": 6, + "intensityTypeKey": "interval", + }, + "endCondition": { + "conditionTypeId": 2, + "conditionValue": 300, + }, + "targetType": {"targetTypeId": 1}, + }, + { + "type": "ExecutableStepDTO", + "stepOrder": 2, + "stepType": { + "stepTypeId": 4, + "stepTypeKey": "recovery", + }, + "intensityType": { + "intensityTypeId": 5, + "intensityTypeKey": "recovery", + }, + "endCondition": { + "conditionTypeId": 2, + "conditionValue": 120, + }, + "targetType": {"targetTypeId": 1}, + }, + ], + } + ], + } + ], + } + + workout = WorkoutSerializer.from_api_format(api_data) + + assert len(workout.steps) == 1 + repeat = workout.steps[0] + assert isinstance(repeat, RepeatGroup) + assert repeat.iterations == 3 + assert len(repeat.steps) == 2 + assert repeat.steps[0].step_type == StepType.INTERVAL + assert repeat.steps[1].step_type == StepType.RECOVERY + + def test_unknown_sport_type(self): + """Test parsing with unknown sport type defaults to OTHER.""" + api_data = { + "workoutName": "Test", + "sportType": {"sportTypeId": 9999, "sportTypeKey": "unknown"}, + "workoutSegments": [], + } + + workout = WorkoutSerializer.from_api_format(api_data) + + assert workout.sport_type == SportType.OTHER + + def test_missing_fields(self): + """Test parsing with missing optional fields.""" + api_data = { + "workoutName": "Test", + "sportType": {"sportTypeId": 2}, + "workoutSegments": [], + } + + workout = WorkoutSerializer.from_api_format(api_data) + + assert workout.name == "Test" + assert workout.description is None + assert workout.workout_id is None + + +class TestWorkoutSerializerRoundTrip: + """Test round-trip serialization/deserialization.""" + + def test_simple_workout_round_trip(self): + """Test simple workout survives serialization round trip.""" + original = Workout( + name="Round Trip Test", + sport_type=SportType.RUNNING, + description="Testing round trip", + ) + original.add_step( + WorkoutStep( + step_type=StepType.WARMUP, + end_condition=EndCondition.time_minutes(10), + target=Target.heart_rate_zone(60, 70), + ) + ) + original.add_step( + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.distance_km(5), + target=Target.power_zone(85, 90), + ) + ) + + # Serialize and deserialize + api_format = WorkoutSerializer.to_api_format(original) + restored = WorkoutSerializer.from_api_format(api_format) + + # Verify + assert restored.name == original.name + assert restored.sport_type == original.sport_type + assert restored.description == original.description + assert len(restored.steps) == len(original.steps) + + # Check first step + assert restored.steps[0].step_type == StepType.WARMUP + assert restored.steps[0].end_condition.value == 600 + + # Check second step + assert restored.steps[1].step_type == StepType.INTERVAL + assert restored.steps[1].end_condition.value == 5000 + + def test_repeat_group_round_trip(self): + """Test workout with repeat group survives round trip.""" + original = Workout(name="Repeat Test") + + repeat = RepeatGroup(iterations=4) + repeat.add_step( + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(3), + ) + ) + repeat.add_step( + WorkoutStep( + step_type=StepType.RECOVERY, + end_condition=EndCondition.time_minutes(2), + ) + ) + original.add_step(repeat) + + # Serialize and deserialize + api_format = WorkoutSerializer.to_api_format(original) + restored = WorkoutSerializer.from_api_format(api_format) + + # Verify + assert len(restored.steps) == 1 + restored_repeat = restored.steps[0] + assert isinstance(restored_repeat, RepeatGroup) + assert restored_repeat.iterations == 4 + assert len(restored_repeat.steps) == 2 From f2805daa10ab8f3ce6d34f560387805d02ddcfdc Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Fri, 16 Jan 2026 17:33:21 -0800 Subject: [PATCH 15/26] Fix workout reps end condition ID and improve MCP documentation - Fix EndConditionType.REPS ID from 8 to 10 to match Garmin API - Fix weight unit parsing to respect unit returned by API (was double-converting) - Improve get_workout output to use 'reps' field instead of 'duration_seconds' for rep-based steps - Improve create_workout documentation to make reps parameter prominent for AI agents The REPS end condition was incorrectly mapped to ID 8, but Garmin's API uses ID 10. This caused reps-based exercises to be parsed as 'lap.button' when reading workouts back. Weight values were also being double-converted because the code assumed kg but Garmin returns the actual unit. --- src/garmy/mcp/server.py | 142 ++++++++++++++++++++++--------- src/garmy/workouts/constants.py | 4 +- src/garmy/workouts/serializer.py | 16 +++- 3 files changed, 115 insertions(+), 47 deletions(-) diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index 17e3c60..71d687b 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -11,7 +11,7 @@ import sqlite3 from datetime import date, timedelta from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union try: from fastmcp import FastMCP @@ -675,44 +675,65 @@ def get_workout(workout_id: int) -> Dict[str, Any]: from ..workouts.models import RepeatGroup if isinstance(step, RepeatGroup): + from ..workouts.constants import EndConditionType + + def format_nested_step(s): + """Format a nested step with appropriate field names.""" + result = { + "type": s.step_type.value, + "end_condition": s.end_condition.condition_type.value, + "target_type": s.target.target_type.value, + "exercise_name": s.exercise_name, + "exercise_category": s.exercise_category, + "weight_value": s.weight_value, + "weight_unit": s.weight_unit, + "description": s.description, + } + # Use appropriate field name based on end condition type + if s.end_condition.condition_type == EndConditionType.REPS: + result["reps"] = ( + int(s.end_condition.value) + if s.end_condition.value + else None + ) + else: + result["duration_seconds"] = s.end_condition.value + return result + steps_info.append( { "index": i + 1, "type": "repeat", "iterations": step.iterations, - "steps": [ - { - "type": s.step_type.value, - "duration_seconds": s.end_condition.value, - "end_condition": s.end_condition.condition_type.value, - "target_type": s.target.target_type.value, - "exercise_name": s.exercise_name, - "exercise_category": s.exercise_category, - "weight_value": s.weight_value, - "weight_unit": s.weight_unit, - "description": s.description, - } - for s in step.steps - ], + "steps": [format_nested_step(s) for s in step.steps], } ) else: - steps_info.append( - { - "index": i + 1, - "type": step.step_type.value, - "duration_seconds": step.end_condition.value, - "end_condition": step.end_condition.condition_type.value, - "target_type": step.target.target_type.value, - "target_low": step.target.value_low, - "target_high": step.target.value_high, - "exercise_name": step.exercise_name, - "exercise_category": step.exercise_category, - "weight_value": step.weight_value, - "weight_unit": step.weight_unit, - "description": step.description, - } - ) + from ..workouts.constants import EndConditionType + + step_info = { + "index": i + 1, + "type": step.step_type.value, + "end_condition": step.end_condition.condition_type.value, + "target_type": step.target.target_type.value, + "target_low": step.target.value_low, + "target_high": step.target.value_high, + "exercise_name": step.exercise_name, + "exercise_category": step.exercise_category, + "weight_value": step.weight_value, + "weight_unit": step.weight_unit, + "description": step.description, + } + # Use appropriate field name based on end condition type + if step.end_condition.condition_type == EndConditionType.REPS: + step_info["reps"] = ( + int(step.end_condition.value) + if step.end_condition.value + else None + ) + else: + step_info["duration_seconds"] = step.end_condition.value + steps_info.append(step_info) return { "success": True, @@ -732,7 +753,7 @@ def create_workout( name: str, sport_type: str = "cycling", description: Optional[str] = None, - steps_json: Optional[str] = None, + steps_json: Union[str, List[Dict[str, Any]], None] = None, ) -> Dict[str, Any]: """WHEN TO USE: When you need to create a new workout in Garmin Connect. @@ -744,14 +765,30 @@ def create_workout( name: Name for the workout sport_type: Sport type (cycling, running, swimming, strength_training, etc.) description: Optional description - steps_json: JSON string defining workout steps. Format: - [{"type": "warmup|interval|recovery|cooldown|rest", - "seconds": 60, "minutes": 10, "duration_seconds": 60, - "target_power": [88, 93], "description": "..."}] - For repeats: {"type": "repeat", "iterations": 3, "steps": [...]} - Duration can be specified as "minutes", "seconds", or "duration_seconds" - - Example steps_json: + steps_json: JSON string defining workout steps. + + Step format - each step can have these fields: + - type: "warmup", "interval", "recovery", "cooldown", "rest", or "repeat" + - Duration (pick ONE): "seconds", "minutes", "duration_seconds", OR "reps" + - reps: Number of repetitions (REQUIRED for strength exercises to show "Target: X Reps") + - exercise_name: Exercise name (auto-resolved, e.g., "bench press" -> "BARBELL_BENCH_PRESS") + - exercise_category: Optional category override + - weight_value: Weight amount for strength exercises + - weight_unit: "pound" or "kilogram" (default: pound) + - target_power: [low, high] percentage of FTP for cycling + - target_hr: [low, high] percentage of max HR + - target_cadence: [low, high] RPM + - description: Step description text + - lap_button: true to end step on lap button press (default if no duration/reps specified) + + For repeats: {"type": "repeat", "iterations": 3, "steps": [...]} + + IMPORTANT FOR STRENGTH TRAINING: + - You MUST include "reps" to set the rep count (e.g., "reps": 10) + - Without "reps", the step defaults to "lap button" end condition + - The "reps" value is what shows as "Target: X Reps" in Garmin Connect UI + + Example cycling workout: '[{"type": "warmup", "seconds": 300}, {"type": "repeat", "iterations": 3, "steps": [ {"type": "interval", "duration_seconds": 30, "target_power": [90, 95]}, @@ -759,6 +796,13 @@ def create_workout( ]}, {"type": "cooldown", "minutes": 5}]' + Example strength workout (note: reps is REQUIRED for rep-based exercises): + '[{"type": "repeat", "iterations": 3, "steps": [ + {"type": "interval", "reps": 10, + "exercise_name": "barbell bench press", "weight_value": 185, "weight_unit": "pound"}, + {"type": "rest", "seconds": 60} + ]}]' + Returns: Created workout details including the new workout_id """ @@ -782,7 +826,23 @@ def create_workout( # Parse and add steps if provided if steps_json: - steps = json.loads(steps_json) + if isinstance(steps_json, str): + try: + steps = json.loads(steps_json) + except json.JSONDecodeError as e: + return { + "success": False, + "error": f"Invalid steps_json format: {e}", + } + elif isinstance(steps_json, list): + # Handle case where environment already parsed the JSON + steps = steps_json + else: + return { + "success": False, + "error": f"steps_json must be a JSON string or list, got {type(steps_json).__name__}", + } + _add_steps_from_json(builder, steps) workout = builder.build() diff --git a/src/garmy/workouts/constants.py b/src/garmy/workouts/constants.py index c7d53b0..a017a3f 100644 --- a/src/garmy/workouts/constants.py +++ b/src/garmy/workouts/constants.py @@ -131,7 +131,7 @@ def condition_type_id(self) -> int: EndConditionType.POWER_LESS_THAN: 11, EndConditionType.POWER_GREATER_THAN: 12, EndConditionType.ITERATIONS: 7, - EndConditionType.REPS: 8, + EndConditionType.REPS: 10, # Garmin uses ID 10 for reps } return type_ids.get(self, 1) @@ -146,7 +146,7 @@ def from_condition_type_id(cls, type_id: int) -> "EndConditionType": 5: cls.HEART_RATE_LESS_THAN, 6: cls.HEART_RATE_GREATER_THAN, 7: cls.ITERATIONS, - 8: cls.REPS, + 10: cls.REPS, # Garmin uses ID 10 for reps 11: cls.POWER_LESS_THAN, 12: cls.POWER_GREATER_THAN, } diff --git a/src/garmy/workouts/serializer.py b/src/garmy/workouts/serializer.py index 1e59953..b2461b6 100644 --- a/src/garmy/workouts/serializer.py +++ b/src/garmy/workouts/serializer.py @@ -340,15 +340,23 @@ def _parse_step(cls, data: Dict[str, Any]) -> WorkoutStep: exercise_category = data.get("category") # Parse weight info - # Garmin stores weight in kilograms, convert to pounds for display + # Garmin returns weight with unit info - respect the unit from API weight_value_raw = data.get("weightValue") + weight_unit_data = data.get("weightUnit") or {} weight_value: Optional[float] = None weight_unit: Optional[str] = None if weight_value_raw is not None and weight_value_raw > 0: - # Convert from kilograms to pounds - weight_value = round(weight_value_raw * 2.20462, 1) - weight_unit = "pound" + # Check what unit Garmin returned + api_unit = weight_unit_data.get("unitKey", "kilogram") + if api_unit == "pound": + # Already in pounds, use as-is + weight_value = round(weight_value_raw, 1) + weight_unit = "pound" + else: + # Assume kilograms, convert to pounds for consistency + weight_value = round(weight_value_raw * 2.20462, 1) + weight_unit = "pound" elif weight_value_raw is not None and weight_value_raw < 0: # Clean up negative placeholder values from API weight_value = None From ec7e81e790a692d3aac689039b0ae2a69f62b18b Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Fri, 16 Jan 2026 18:42:05 -0800 Subject: [PATCH 16/26] Fix workout weight display on Garmin mobile app The Garmin mobile app has a bug where it doesn't convert kilogram values to the user's display preference. Previously, the serializer always converted weight to kilograms before sending to the API. While the web interface handled this correctly, the mobile app displayed the raw kg value with the wrong unit label (e.g., 83.9 lbs instead of 185 lbs). --- src/garmy/workouts/serializer.py | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/src/garmy/workouts/serializer.py b/src/garmy/workouts/serializer.py index b2461b6..8c3edd4 100644 --- a/src/garmy/workouts/serializer.py +++ b/src/garmy/workouts/serializer.py @@ -142,26 +142,24 @@ def _serialize_step(cls, step: WorkoutStep, order: int) -> Dict[str, Any]: result["exerciseName"] = step.exercise_name if step.weight_value is not None: - # Garmin stores weight in grams internally and uses kilogram as the standard unit - # We must convert to kilograms and send with kilogram unit + # Send weight in its original unit - the Garmin mobile app has a bug + # where it doesn't convert kg to the user's display preference, + # so we send in the user's specified unit (default: pound) weight_unit = step.weight_unit or "pound" - weight_in_kg: float + result["weightValue"] = step.weight_value if weight_unit == "kilogram": - weight_in_kg = step.weight_value - elif weight_unit == "pound": - # Convert pounds to kilograms - weight_in_kg = step.weight_value / 2.20462 + result["weightUnit"] = { + "unitId": 8, + "unitKey": "kilogram", + "factor": 1000.0, + } else: - # Assume kilograms - weight_in_kg = step.weight_value - - result["weightValue"] = round(weight_in_kg, 2) - # Always send as kilogram - Garmin will convert to user's display preference - result["weightUnit"] = { - "unitId": 8, - "unitKey": "kilogram", - "factor": 1000.0, - } + # Default to pound + result["weightUnit"] = { + "unitId": 9, + "unitKey": "pound", + "factor": 453.59237, + } return result From c48b442adfc5913ace6efd35663ac731e0d7487f Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Mon, 2 Mar 2026 17:58:46 -0800 Subject: [PATCH 17/26] Add --resync-days flag to force re-fetch of recently completed sync records --- src/garmy/localdb/cli.py | 9 +++++++++ src/garmy/localdb/db.py | 23 +++++++++++++++++++++++ src/garmy/localdb/sync.py | 26 ++++++++++++++++++++++++++ src/garmy/mcp/server.py | 11 +++++++++++ 4 files changed, 69 insertions(+) diff --git a/src/garmy/localdb/cli.py b/src/garmy/localdb/cli.py index 54f18ce..09d54d8 100644 --- a/src/garmy/localdb/cli.py +++ b/src/garmy/localdb/cli.py @@ -158,6 +158,7 @@ def cmd_sync(args) -> int: start_date=start_date, end_date=end_date, metrics=metrics, + resync_days=args.resync_days, ) # Print results @@ -494,6 +495,14 @@ def create_parser() -> argparse.ArgumentParser: type=str, help="Comma-separated list of metrics to sync (default: all)", ) + sync_parser.add_argument( + "--resync-days", + type=int, + default=0, + metavar="N", + help="Force re-sync of the last N days even if already completed. " + "Useful for updating partial data from earlier syncs (default: 0)", + ) sync_parser.add_argument( "--progress", choices=["tqdm", "simple", "silent"], diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index 1509ceb..d9c38bc 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -296,6 +296,29 @@ def get_sync_status( ) return sync_status.status if sync_status else None + def reset_completed_statuses( + self, user_id: int, start_date: date, end_date: date + ) -> int: + """Reset completed sync statuses to pending for a date range. + + Returns the number of records reset. + """ + with self.get_session() as session: + count = ( + session.query(SyncStatus) + .filter( + and_( + SyncStatus.user_id == user_id, + SyncStatus.sync_date >= start_date, + SyncStatus.sync_date <= end_date, + SyncStatus.status == "completed", + ) + ) + .update({"status": "pending"}) + ) + session.commit() + return count + def get_pending_metrics(self, user_id: int, sync_date: date) -> List[str]: """Get list of pending metrics for date.""" with self.get_session() as session: diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index aaf9d31..0ef735a 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -94,6 +94,7 @@ def sync_range( start_date: date, end_date: date, metrics: Optional[List[MetricType]] = None, + resync_days: int = 0, ) -> Dict[str, int]: """Sync metrics for date range. @@ -102,6 +103,9 @@ def sync_range( start_date: Start of sync range end_date: End of sync range metrics: List of metrics to sync (default: all) + resync_days: Number of recent days to force re-sync even if + already completed. Useful for ensuring today's partial data + gets updated with final totals on subsequent runs. Returns: Dict with sync statistics @@ -116,6 +120,19 @@ def sync_range( f"Date range too large: {date_count} days. Maximum allowed: {self.config.sync.max_sync_days} days" ) + # Reset completed status for recent days so they get re-fetched + if resync_days > 0: + resync_cutoff = date.today() - timedelta(days=resync_days - 1) + resync_start = max(start_date, resync_cutoff) + reset_count = self._reset_completed_statuses( + user_id, resync_start, end_date + ) + if reset_count > 0: + self.progress.info( + f"Reset {reset_count} completed records for re-sync " + f"({resync_start} to {end_date})" + ) + if metrics is None: metrics = list(MetricType) @@ -769,6 +786,15 @@ def _store_health_metric( # Store all extracted data for these metrics self.db.store_health_metric(user_id, sync_date, **data) + def _reset_completed_statuses( + self, user_id: int, start_date: date, end_date: date + ) -> int: + """Reset completed sync statuses to pending for a date range. + + Returns the number of records reset. + """ + return self.db.reset_completed_statuses(user_id, start_date, end_date) + def _is_metric_completed( self, user_id: int, metric_type: MetricType, sync_date: date ) -> bool: diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index 71d687b..1f7cae6 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -463,6 +463,7 @@ def sync_health_data( last_days: int = 7, metrics: Optional[str] = None, user_id: int = 1, + resync_days: int = 0, ) -> Dict[str, Any]: """WHEN TO USE: When you need to fetch fresh data from Garmin Connect. @@ -480,6 +481,8 @@ def sync_health_data( BODY_BATTERY, HRV, CALORIES, RESPIRATION, TRAINING_READINESS, ACTIVITIES, BODY_COMPOSITION user_id: User ID for database records (default: 1) + resync_days: Force re-sync of the last N days even if already completed. + Useful for updating partial data from earlier syncs (default: 0, max: 7) Returns: Sync statistics including completed, skipped, and failed counts @@ -494,6 +497,13 @@ def sync_health_data( ) if user_id < 1: raise ValueError("user_id must be positive") + if resync_days < 0: + raise ValueError("resync_days must be non-negative") + if resync_days > 7: + raise ValueError( + "resync_days cannot exceed 7 for MCP sync. " + "For larger re-syncs, use 'garmy-sync sync' CLI directly." + ) # Parse metrics if provided sync_metrics: Optional[List[MetricType]] = None @@ -545,6 +555,7 @@ def sync_health_data( start_date=start_date, end_date=end_date, metrics=sync_metrics, + resync_days=resync_days, ) return { From 99998b06b11dd2c79cb95833a712654f8e40580a Mon Sep 17 00:00:00 2001 From: Zaheer Abbas Merali Date: Thu, 5 Mar 2026 10:04:11 +0000 Subject: [PATCH 18/26] Fix timeseries sync crash on NULL/NaN values SQLAlchemy autoflush was triggering NOT NULL constraint violations when merging timeseries entries with NaN float values. Wrap the batch loop in session.no_autoflush and add math.isnan() check alongside the existing None check to skip invalid entries before they reach the database. Co-Authored-By: Claude Opus 4.6 --- src/garmy/localdb/db.py | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index d9c38bc..5cd86fd 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -163,19 +163,24 @@ def store_timeseries_batch( self, user_id: int, metric_type: MetricType, data: List[tuple] ): """Store batch of timeseries data.""" + import math + with self.get_session() as session: - for timestamp, value, metadata in data: - # Skip entries with None values (NOT NULL constraint) - if value is None: - continue - timeseries = TimeSeries( - user_id=user_id, - metric_type=metric_type.value, - timestamp=timestamp, - value=value, - meta_data=metadata, - ) - session.merge(timeseries) + with session.no_autoflush: + for timestamp, value, metadata in data: + # Skip entries with None/NaN values (NOT NULL constraint) + if value is None or ( + isinstance(value, float) and math.isnan(value) + ): + continue + timeseries = TimeSeries( + user_id=user_id, + metric_type=metric_type.value, + timestamp=timestamp, + value=value, + meta_data=metadata, + ) + session.merge(timeseries) session.commit() def store_activity(self, user_id: int, activity_data: Dict[str, Any]): From eb11122703a087856933a7f0f8612eafc87ba974 Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Sat, 7 Mar 2026 13:23:10 -0800 Subject: [PATCH 19/26] Add skip-last-rest support for repeat groups in workouts and MCP server --- src/garmy/mcp/server.py | 19 +++- src/garmy/workouts/builder.py | 20 +++- src/garmy/workouts/models.py | 2 + src/garmy/workouts/serializer.py | 5 + tests/test_workouts_serializer.py | 173 ++++++++++++++++++++++++++++++ 5 files changed, 211 insertions(+), 8 deletions(-) diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index 1f7cae6..8d110bb 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -668,7 +668,9 @@ def get_workout(workout_id: int) -> Dict[str, Any]: workout_id: The Garmin workout ID to retrieve Returns: - Full workout details including steps and structure + Full workout details including steps and structure. + Repeat steps include a "skip_last_rest" boolean field indicating + whether the last iteration skips its rest/recovery step. """ if workout_id < 1: raise ValueError("workout_id must be positive") @@ -716,6 +718,7 @@ def format_nested_step(s): "index": i + 1, "type": "repeat", "iterations": step.iterations, + "skip_last_rest": step.smart_repeat, "steps": [format_nested_step(s) for s in step.steps], } ) @@ -792,12 +795,19 @@ def create_workout( - description: Step description text - lap_button: true to end step on lap button press (default if no duration/reps specified) - For repeats: {"type": "repeat", "iterations": 3, "steps": [...]} + For repeats: + - type: "repeat" + - iterations: Number of times to repeat (e.g., 3) + - skip_last_rest: When true, the last iteration skips its rest/recovery step. + This avoids double-resting when a transition rest follows the repeat block. + Recommended for strength training. (default: false) + - steps: Array of steps to repeat IMPORTANT FOR STRENGTH TRAINING: - You MUST include "reps" to set the rep count (e.g., "reps": 10) - Without "reps", the step defaults to "lap button" end condition - The "reps" value is what shows as "Target: X Reps" in Garmin Connect UI + - Use "skip_last_rest": true on repeat blocks to avoid double rest periods Example cycling workout: '[{"type": "warmup", "seconds": 300}, @@ -808,7 +818,7 @@ def create_workout( {"type": "cooldown", "minutes": 5}]' Example strength workout (note: reps is REQUIRED for rep-based exercises): - '[{"type": "repeat", "iterations": 3, "steps": [ + '[{"type": "repeat", "iterations": 3, "skip_last_rest": true, "steps": [ {"type": "interval", "reps": 10, "exercise_name": "barbell bench press", "weight_value": 185, "weight_unit": "pound"}, {"type": "rest", "seconds": 60} @@ -1041,8 +1051,9 @@ def _add_steps_from_json(builder: WorkoutBuilder, steps: list) -> None: if step_type == "repeat": iterations = step.get("iterations", 1) + skip_last_rest = step.get("skip_last_rest", False) repeat_steps = step.get("steps", []) - repeat_builder = builder.repeat(iterations) + repeat_builder = builder.repeat(iterations, smart_repeat=skip_last_rest) for rs in repeat_steps: rs_type = rs.get("type", "interval").lower() diff --git a/src/garmy/workouts/builder.py b/src/garmy/workouts/builder.py index 574f50d..e799f26 100644 --- a/src/garmy/workouts/builder.py +++ b/src/garmy/workouts/builder.py @@ -37,15 +37,19 @@ class RepeatBuilder: Created via WorkoutBuilder.repeat() and returns to parent via end_repeat(). """ - def __init__(self, parent: "WorkoutBuilder", iterations: int) -> None: + def __init__( + self, parent: "WorkoutBuilder", iterations: int, smart_repeat: bool = False + ) -> None: """Initialize repeat builder. Args: parent: The parent WorkoutBuilder to return to. iterations: Number of times to repeat the steps. + smart_repeat: When True, skip the last rest/recovery step in the final iteration. """ self._parent = parent self._iterations = iterations + self._smart_repeat = smart_repeat self._steps: List[WorkoutStep] = [] def _create_step( @@ -239,7 +243,11 @@ def step( def end_repeat(self) -> "WorkoutBuilder": """Finish the repeat group and return to the parent builder.""" - repeat_group = RepeatGroup(iterations=self._iterations, steps=self._steps) + repeat_group = RepeatGroup( + iterations=self._iterations, + steps=self._steps, + smart_repeat=self._smart_repeat, + ) self._parent._steps.append(repeat_group) return self._parent @@ -523,15 +531,19 @@ def step( self._steps.append(step) return self - def repeat(self, iterations: int) -> RepeatBuilder: + def repeat(self, iterations: int, smart_repeat: bool = False) -> RepeatBuilder: """Start a repeat group with the specified number of iterations. Use end_repeat() on the returned RepeatBuilder to return to this builder. + Args: + iterations: Number of times to repeat the steps. + smart_repeat: When True, skip the last rest/recovery step in the final iteration. + Example: >>> builder.repeat(3).interval(minutes=5).recovery(minutes=2).end_repeat() """ - return RepeatBuilder(self, iterations) + return RepeatBuilder(self, iterations, smart_repeat=smart_repeat) def add_step(self, step: WorkoutStepOrRepeat) -> "WorkoutBuilder": """Add a pre-built step or repeat group.""" diff --git a/src/garmy/workouts/models.py b/src/garmy/workouts/models.py index c867683..f1d2fe4 100644 --- a/src/garmy/workouts/models.py +++ b/src/garmy/workouts/models.py @@ -209,11 +209,13 @@ class RepeatGroup: iterations: Number of times to repeat the steps steps: List of steps within the repeat group step_order: Order within parent workout (set during serialization) + smart_repeat: When True, skip the last rest/recovery step in the final iteration """ iterations: int = 1 steps: List[WorkoutStep] = field(default_factory=list) step_order: Optional[int] = None + smart_repeat: bool = False def add_step(self, step: WorkoutStep) -> "RepeatGroup": """Add a step to the repeat group.""" diff --git a/src/garmy/workouts/serializer.py b/src/garmy/workouts/serializer.py index 8c3edd4..9e80676 100644 --- a/src/garmy/workouts/serializer.py +++ b/src/garmy/workouts/serializer.py @@ -231,6 +231,8 @@ def _serialize_repeat_group(cls, repeat: RepeatGroup, order: int) -> Dict[str, A "conditionTypeKey": EndConditionType.ITERATIONS.value, }, "endConditionValue": float(repeat.iterations), + "smartRepeat": False, + "skipLastRestStep": repeat.smart_repeat, "workoutSteps": child_steps, } @@ -441,8 +443,11 @@ def _parse_repeat_group(cls, data: Dict[str, Any]) -> RepeatGroup: if s.get("type") != "RepeatGroupDTO" ] + smart_repeat = bool(data.get("skipLastRestStep", False)) + return RepeatGroup( iterations=iterations, steps=steps, step_order=data.get("stepOrder"), + smart_repeat=smart_repeat, ) diff --git a/tests/test_workouts_serializer.py b/tests/test_workouts_serializer.py index c8622cd..1cf75f9 100644 --- a/tests/test_workouts_serializer.py +++ b/tests/test_workouts_serializer.py @@ -336,6 +336,179 @@ def test_missing_fields(self): assert workout.workout_id is None +class TestSmartRepeatSerialization: + """Test cases for smart_repeat (skip last rest) feature.""" + + def test_serialize_smart_repeat_true(self): + """Test serialization with smart_repeat=True produces correct API fields.""" + workout = Workout(name="Test") + repeat = RepeatGroup(iterations=3, smart_repeat=True) + repeat.add_step( + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(5), + ) + ) + repeat.add_step( + WorkoutStep( + step_type=StepType.REST, + end_condition=EndCondition.time_minutes(1), + ) + ) + workout.add_step(repeat) + + result = WorkoutSerializer.to_api_format(workout) + repeat_data = result["workoutSegments"][0]["workoutSteps"][0] + + assert repeat_data["skipLastRestStep"] is True + assert repeat_data["smartRepeat"] is False + + def test_serialize_smart_repeat_false_default(self): + """Test serialization with smart_repeat=False (default) produces correct API fields.""" + workout = Workout(name="Test") + repeat = RepeatGroup(iterations=3) + repeat.add_step( + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(5), + ) + ) + workout.add_step(repeat) + + result = WorkoutSerializer.to_api_format(workout) + repeat_data = result["workoutSegments"][0]["workoutSteps"][0] + + assert repeat_data["skipLastRestStep"] is False + assert repeat_data["smartRepeat"] is False + + def test_parse_skip_last_rest_step_true(self): + """Test parsing API response with skipLastRestStep=true.""" + api_data = { + "workoutName": "Test", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSteps": [ + { + "type": "RepeatGroupDTO", + "stepOrder": 1, + "numberOfIterations": 3, + "skipLastRestStep": True, + "smartRepeat": False, + "endCondition": { + "conditionTypeId": 7, + "conditionTypeKey": "iterations", + }, + "endConditionValue": 3.0, + "workoutSteps": [ + { + "type": "ExecutableStepDTO", + "stepOrder": 1, + "stepType": { + "stepTypeId": 3, + "stepTypeKey": "interval", + }, + "intensityType": { + "intensityTypeId": 6, + "intensityTypeKey": "interval", + }, + "endCondition": { + "conditionTypeId": 2, + "conditionValue": 300, + }, + "targetType": {"targetTypeId": 1}, + }, + ], + } + ], + } + ], + } + + workout = WorkoutSerializer.from_api_format(api_data) + + repeat = workout.steps[0] + assert isinstance(repeat, RepeatGroup) + assert repeat.smart_repeat is True + + def test_parse_skip_last_rest_step_missing(self): + """Test parsing API response without skipLastRestStep defaults to False.""" + api_data = { + "workoutName": "Test", + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSegments": [ + { + "segmentOrder": 1, + "sportType": {"sportTypeId": 2, "sportTypeKey": "cycling"}, + "workoutSteps": [ + { + "type": "RepeatGroupDTO", + "stepOrder": 1, + "numberOfIterations": 2, + "endCondition": { + "conditionTypeId": 7, + "conditionTypeKey": "iterations", + }, + "workoutSteps": [ + { + "type": "ExecutableStepDTO", + "stepOrder": 1, + "stepType": { + "stepTypeId": 3, + "stepTypeKey": "interval", + }, + "intensityType": { + "intensityTypeId": 6, + "intensityTypeKey": "interval", + }, + "endCondition": { + "conditionTypeId": 2, + "conditionValue": 300, + }, + "targetType": {"targetTypeId": 1}, + }, + ], + } + ], + } + ], + } + + workout = WorkoutSerializer.from_api_format(api_data) + + repeat = workout.steps[0] + assert isinstance(repeat, RepeatGroup) + assert repeat.smart_repeat is False + + def test_smart_repeat_round_trip(self): + """Test smart_repeat=True survives serialization round trip.""" + original = Workout(name="Smart Repeat Test") + repeat = RepeatGroup(iterations=3, smart_repeat=True) + repeat.add_step( + WorkoutStep( + step_type=StepType.INTERVAL, + end_condition=EndCondition.time_minutes(5), + ) + ) + repeat.add_step( + WorkoutStep( + step_type=StepType.REST, + end_condition=EndCondition.time_minutes(1), + ) + ) + original.add_step(repeat) + + api_format = WorkoutSerializer.to_api_format(original) + restored = WorkoutSerializer.from_api_format(api_format) + + restored_repeat = restored.steps[0] + assert isinstance(restored_repeat, RepeatGroup) + assert restored_repeat.smart_repeat is True + assert restored_repeat.iterations == 3 + + class TestWorkoutSerializerRoundTrip: """Test round-trip serialization/deserialization.""" From 32b28b4efe1f4e5e3c11ae31596459f942030c5d Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Sat, 14 Mar 2026 13:26:43 -0700 Subject: [PATCH 20/26] Fix MCP workout tools failing when access token expires between daemon sync runs --- src/garmy/mcp/server.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index 8d110bb..50299f5 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -605,10 +605,13 @@ def _get_authenticated_client() -> APIClient: """Get an authenticated API client using saved tokens.""" auth_client = AuthClient(token_dir=config.token_dir) if not auth_client.is_authenticated: - raise ValueError( - "Authentication required. Please run 'garmy-sync sync' from the " - "command line first to authenticate, then try again." - ) + if auth_client.needs_refresh: + auth_client.refresh_tokens() + else: + raise ValueError( + "Authentication required. Please run 'garmy-sync sync' from the " + "command line first to authenticate, then try again." + ) return APIClient(auth_client=auth_client) @mcp.tool() From bc3efa0aa76e664cfb1ff5b3df0c0120367fd200 Mon Sep 17 00:00:00 2001 From: Jaakko Tiistola Date: Tue, 24 Mar 2026 13:44:02 -0700 Subject: [PATCH 21/26] Add SpO2 metric and enable HRV timeseries/baseline storage Add SpO2 as a new metric with daily summary extraction and hourly timeseries data. Expand HRV with sleep-period timeseries readings and baseline range fields. Fix pre-existing HRV parser bug where lastNight5MinHigh was mapped to the wrong snake_case key. All changes verified with live Garmin API data. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/database-schema.md | 14 ++- src/garmy/localdb/db.py | 12 ++ src/garmy/localdb/extractors.py | 63 +++++++++- src/garmy/localdb/models.py | 6 + src/garmy/localdb/sync.py | 11 +- src/garmy/metrics/__init__.py | 2 + src/garmy/metrics/hrv.py | 2 +- src/garmy/metrics/spo2.py | 97 ++++++++++++++ tests/test_hrv_timeseries.py | 188 ++++++++++++++++++++++++++++ tests/test_metrics_comprehensive.py | 5 + tests/test_metrics_remaining.py | 5 + tests/test_spo2_metric.py | 182 +++++++++++++++++++++++++++ 12 files changed, 573 insertions(+), 14 deletions(-) create mode 100644 src/garmy/metrics/spo2.py create mode 100644 tests/test_hrv_timeseries.py create mode 100644 tests/test_spo2_metric.py diff --git a/docs/database-schema.md b/docs/database-schema.md index 05efeb3..b680a8b 100644 --- a/docs/database-schema.md +++ b/docs/database-schema.md @@ -109,8 +109,11 @@ light_sleep_percentage FLOAT -- % of sleep in light rem_sleep_percentage FLOAT -- % of sleep in REM awake_percentage FLOAT -- % of time awake --- Respiration and SpO2 +-- SpO2 average_spo2 FLOAT -- Average blood oxygen +lowest_spo2 FLOAT -- Lowest SpO2 reading + +-- Respiration average_respiration FLOAT -- Average respiration rate avg_waking_respiration_value FLOAT avg_sleep_respiration_value FLOAT @@ -124,6 +127,10 @@ training_readiness_feedback TEXT -- Readiness feedback hrv_weekly_avg FLOAT -- Weekly HRV average hrv_last_night_avg FLOAT -- Last night HRV hrv_status TEXT -- HRV status description +hrv_last_night_5min_high FLOAT -- Last night 5-min high HRV +hrv_baseline_low_upper FLOAT -- Baseline range: low upper boundary +hrv_baseline_balanced_low FLOAT -- Baseline range: balanced lower boundary +hrv_baseline_balanced_upper FLOAT -- Baseline range: balanced upper boundary -- Timestamps created_at DATETIME -- Record creation time @@ -146,9 +153,11 @@ meta_data JSON -- Additional metadata (optional) **Common Metric Types:** - `heart_rate` - Heart rate readings -- `stress` - Stress level measurements +- `stress` - Stress level measurements - `body_battery` - Body battery levels - `respiration` - Respiration rate readings +- `hrv` - Heart rate variability readings (during sleep) +- `spo2` - Blood oxygen saturation readings (overnight) ### `activities` **Purpose:** Individual workouts and physical activities @@ -518,6 +527,7 @@ Supported metric types in `sync_status` and `timeseries`: - `RESPIRATION` - `STEPS` - `CALORIES` +- `SPO2` ## 🔧 Performance Considerations diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index d9c38bc..f300a89 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -113,6 +113,13 @@ def _migrate_schema(self): ("sleep_wake_time", "VARCHAR"), ("sleep_need_minutes", "INTEGER"), ("skin_temp_deviation_c", "FLOAT"), + # SpO2 fields + ("lowest_spo2", "FLOAT"), + # HRV baseline fields + ("hrv_last_night_5min_high", "FLOAT"), + ("hrv_baseline_low_upper", "FLOAT"), + ("hrv_baseline_balanced_low", "FLOAT"), + ("hrv_baseline_balanced_upper", "FLOAT"), ] with self.engine.connect() as conn: @@ -479,6 +486,7 @@ def _metric_to_dict(self, metric: DailyHealthMetric) -> Dict[str, Any]: "rem_sleep_percentage": metric.rem_sleep_percentage, "awake_percentage": metric.awake_percentage, "average_spo2": metric.average_spo2, + "lowest_spo2": metric.lowest_spo2, "average_respiration": metric.average_respiration, "training_readiness_score": metric.training_readiness_score, "training_readiness_level": metric.training_readiness_level, @@ -486,6 +494,10 @@ def _metric_to_dict(self, metric: DailyHealthMetric) -> Dict[str, Any]: "hrv_weekly_avg": metric.hrv_weekly_avg, "hrv_last_night_avg": metric.hrv_last_night_avg, "hrv_status": metric.hrv_status, + "hrv_last_night_5min_high": metric.hrv_last_night_5min_high, + "hrv_baseline_low_upper": metric.hrv_baseline_low_upper, + "hrv_baseline_balanced_low": metric.hrv_baseline_balanced_low, + "hrv_baseline_balanced_upper": metric.hrv_baseline_balanced_upper, "avg_waking_respiration_value": metric.avg_waking_respiration_value, "avg_sleep_respiration_value": metric.avg_sleep_respiration_value, "lowest_respiration_value": metric.lowest_respiration_value, diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 21cac65..762be46 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -35,6 +35,8 @@ def extract_metric_data( return self._extract_stress_summary(data) elif metric_type == MetricType.BODY_BATTERY: return self._extract_body_battery_summary(data) + elif metric_type == MetricType.SPO2: + return self._extract_spo2_data(data) elif metric_type == MetricType.BODY_COMPOSITION: return self._extract_body_composition_data(data) else: @@ -204,11 +206,26 @@ def _extract_hrv_data(self, data: Any) -> Dict[str, Any]: """Extract HRV using nested summary.""" hrv_summary = getattr(data, "hrv_summary", None) if hrv_summary: - return { - "weekly_avg": getattr(hrv_summary, "weekly_avg", None), - "last_night_avg": getattr(hrv_summary, "last_night_avg", None), - "status": getattr(hrv_summary, "status", None), + result = { + "hrv_weekly_avg": getattr(hrv_summary, "weekly_avg", None), + "hrv_last_night_avg": getattr(hrv_summary, "last_night_avg", None), + "hrv_status": getattr(hrv_summary, "status", None), + "hrv_last_night_5min_high": getattr( + hrv_summary, "last_night_5_min_high", None + ), } + baseline = getattr(hrv_summary, "baseline", None) + if baseline: + result["hrv_baseline_low_upper"] = getattr( + baseline, "low_upper", None + ) + result["hrv_baseline_balanced_low"] = getattr( + baseline, "balanced_low", None + ) + result["hrv_baseline_balanced_upper"] = getattr( + baseline, "balanced_upper", None + ) + return result return {} def _extract_respiration_summary(self, data: Any) -> Dict[str, Any]: @@ -255,6 +272,13 @@ def _extract_respiration_summary(self, data: Any) -> Dict[str, Any]: return {} + def _extract_spo2_data(self, data: Any) -> Dict[str, Any]: + """Extract SpO2 daily summary data.""" + return { + "average_spo2": getattr(data, "average_spo2", None), + "lowest_spo2": getattr(data, "lowest_spo2", None), + } + def _extract_activity_data(self, data: Any) -> Dict[str, Any]: """Extract activity data from both parsed and raw formats. @@ -367,6 +391,37 @@ def extract_timeseries_data( for reading in data.respiration_readings: timeseries_data.append((reading.timestamp, reading.value, {})) + elif metric_type == MetricType.HRV: + if hasattr(data, "hrv_readings") and data.hrv_readings: + from datetime import datetime + + for reading in data.hrv_readings: + if reading.hrv_value is None: + continue + # Convert ISO timestamp string to unix ms + if reading.reading_time_gmt: + try: + dt = datetime.fromisoformat( + reading.reading_time_gmt.replace("Z", "+00:00") + ) + timestamp_ms = int(dt.timestamp() * 1000) + timeseries_data.append( + (timestamp_ms, reading.hrv_value, {}) + ) + except (ValueError, OSError): + continue + + elif metric_type == MetricType.SPO2: + if ( + hasattr(data, "spo2_hourly_averages") + and data.spo2_hourly_averages + ): + for reading in data.spo2_hourly_averages: + if isinstance(reading, (list, tuple)) and len(reading) >= 2: + timestamp, spo2_value = reading[0], reading[1] + if spo2_value is not None: + timeseries_data.append((timestamp, spo2_value, {})) + return timeseries_data def _extract_steps_data(self, data: Any) -> Dict[str, Any]: diff --git a/src/garmy/localdb/models.py b/src/garmy/localdb/models.py index f04a568..f926ebb 100644 --- a/src/garmy/localdb/models.py +++ b/src/garmy/localdb/models.py @@ -34,6 +34,7 @@ class MetricType(Enum): STEPS = "steps" CALORIES = "calories" BODY_COMPOSITION = "body_composition" + SPO2 = "spo2" class TimeSeries(Base): @@ -195,6 +196,7 @@ class DailyHealthMetric(Base): awake_percentage = Column(Float) average_spo2 = Column(Float) + lowest_spo2 = Column(Float) average_respiration = Column(Float) training_readiness_score = Column(Integer) @@ -204,6 +206,10 @@ class DailyHealthMetric(Base): hrv_weekly_avg = Column(Float) hrv_last_night_avg = Column(Float) hrv_status = Column(Text) + hrv_last_night_5min_high = Column(Float) + hrv_baseline_low_upper = Column(Float) + hrv_baseline_balanced_low = Column(Float) + hrv_baseline_balanced_upper = Column(Float) avg_waking_respiration_value = Column(Float) avg_sleep_respiration_value = Column(Float) diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index 0ef735a..03efabb 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -251,6 +251,8 @@ def _sync_metric_for_date( MetricType.STRESS, MetricType.HEART_RATE, MetricType.RESPIRATION, + MetricType.HRV, + MetricType.SPO2, ]: timeseries_data = self.extractor.extract_timeseries_data( data, metric_type @@ -768,13 +770,7 @@ def _store_health_metric( training_readiness_feedback=data.get("feedback"), ) elif metric_type == MetricType.HRV: - self.db.store_health_metric( - user_id, - sync_date, - hrv_weekly_avg=data.get("weekly_avg"), - hrv_last_night_avg=data.get("last_night_avg"), - hrv_status=data.get("status"), - ) + self.db.store_health_metric(user_id, sync_date, **data) elif metric_type in [ MetricType.RESPIRATION, MetricType.HEART_RATE, @@ -782,6 +778,7 @@ def _store_health_metric( MetricType.BODY_BATTERY, MetricType.STEPS, MetricType.CALORIES, + MetricType.SPO2, ]: # Store all extracted data for these metrics self.db.store_health_metric(user_id, sync_date, **data) diff --git a/src/garmy/metrics/__init__.py b/src/garmy/metrics/__init__.py index 14a0121..58f8d0b 100644 --- a/src/garmy/metrics/__init__.py +++ b/src/garmy/metrics/__init__.py @@ -87,6 +87,7 @@ from .hrv import HRV from .respiration import Respiration from .sleep import Sleep +from .spo2 import SpO2 from .steps import Steps from .stress import Stress from .training_readiness import TrainingReadiness @@ -100,6 +101,7 @@ "HeartRate", "Respiration", "Sleep", + "SpO2", "Steps", "Stress", "TrainingReadiness", diff --git a/src/garmy/metrics/hrv.py b/src/garmy/metrics/hrv.py index 0a92073..9f73401 100644 --- a/src/garmy/metrics/hrv.py +++ b/src/garmy/metrics/hrv.py @@ -89,7 +89,7 @@ def parse_hrv_data(data: Dict[str, Any]) -> "HRV": calendar_date=hrv_summary_data.get("calendar_date", ""), weekly_avg=hrv_summary_data.get("weekly_avg", 0), last_night_avg=hrv_summary_data.get("last_night_avg", 0), - last_night_5_min_high=hrv_summary_data.get("last_night_5_min_high", 0), + last_night_5_min_high=hrv_summary_data.get("last_night5_min_high", 0), baseline=baseline, status=hrv_summary_data.get("status", ""), feedback_phrase=hrv_summary_data.get("feedback_phrase", ""), diff --git a/src/garmy/metrics/spo2.py b/src/garmy/metrics/spo2.py new file mode 100644 index 0000000..9f74b21 --- /dev/null +++ b/src/garmy/metrics/spo2.py @@ -0,0 +1,97 @@ +"""SpO2 (Blood Oxygen Saturation) metric module. + +This module provides access to Garmin SpO2 data using the auto-discovery +architecture. Includes daily summary (average, min, max) and hourly average +readings for timeseries storage. + +Data Source: + Garmin Connect API endpoint: /wellness-service/wellness/daily/spo2/{date} +""" + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from ..core.base import MetricConfig +from ..core.utils import camel_to_snake_dict + + +@dataclass +class SpO2: + """Daily SpO2 data from Garmin Connect API. + + Raw SpO2 data including daily averages and hourly readings throughout + the day. All data comes directly from Garmin's wellness service. + + Attributes: + calendar_date: Date string (YYYY-MM-DD) + average_spo2: Daily average SpO2 + lowest_spo2: Lowest SpO2 reading + latest_spo2: Most recent SpO2 reading + avg_sleep_spo2: Average SpO2 during sleep + last_seven_days_avg_spo2: Rolling 7-day average + spo2_hourly_averages: Hourly average readings as [timestamp_ms, value] pairs + + Example: + >>> spo2 = garmy.spo2.get() + >>> print(f"Average SpO2: {spo2.average_spo2}%") + >>> print(f"Lowest: {spo2.lowest_spo2}%") + >>> print(f"Hourly readings: {len(spo2.spo2_hourly_averages)}") + """ + + calendar_date: str = "" + average_spo2: Optional[float] = None + lowest_spo2: Optional[int] = None + latest_spo2: Optional[int] = None + avg_sleep_spo2: Optional[float] = None + last_seven_days_avg_spo2: Optional[float] = None + spo2_hourly_averages: List[List[Any]] = field(default_factory=list) + + @property + def readings_count(self) -> int: + """Get number of hourly average readings.""" + return len(self.spo2_hourly_averages) + + @property + def valid_readings_count(self) -> int: + """Get number of valid hourly readings (excluding None values).""" + return len( + [ + reading + for reading in self.spo2_hourly_averages + if len(reading) >= 2 and reading[1] is not None + ] + ) + + +def parse_spo2_data(data: Dict[str, Any]) -> SpO2: + """Parse SpO2 API response into structured data.""" + snake_dict = camel_to_snake_dict(data) + + if not isinstance(snake_dict, dict): + raise ValueError( + f"Expected dictionary from API response but got {type(snake_dict).__name__}. " + f"Raw data: {data}" + ) + + return SpO2( + calendar_date=snake_dict.get("calendar_date", ""), + average_spo2=snake_dict.get("average_sp_o2"), + lowest_spo2=snake_dict.get("lowest_sp_o2"), + latest_spo2=snake_dict.get("latest_sp_o2"), + avg_sleep_spo2=snake_dict.get("avg_sleep_sp_o2"), + last_seven_days_avg_spo2=snake_dict.get("last_seven_days_avg_sp_o2"), + spo2_hourly_averages=snake_dict.get("sp_o2_hourly_averages") or [], + ) + + +# Declarative configuration for auto-discovery with custom parser +METRIC_CONFIG = MetricConfig( + endpoint="/wellness-service/wellness/daily/spo2/{date}", + metric_class=SpO2, + parser=parse_spo2_data, + description="Daily blood oxygen saturation with hourly average readings", + version="1.0", +) + +# Export for auto-discovery +__metric_config__ = METRIC_CONFIG diff --git a/tests/test_hrv_timeseries.py b/tests/test_hrv_timeseries.py new file mode 100644 index 0000000..c989333 --- /dev/null +++ b/tests/test_hrv_timeseries.py @@ -0,0 +1,188 @@ +"""Tests for HRV timeseries extraction and expanded daily summary fields.""" + +from garmy.localdb.extractors import DataExtractor +from garmy.localdb.models import MetricType +from garmy.metrics.hrv import HRV, HRVBaseline, HRVReading, HRVSummary + + +class TestHRVTimeseriesExtraction: + """Test HRV timeseries data extraction for localdb storage.""" + + def create_sample_hrv_data(self, **overrides): + """Create a realistic HRV data object for testing.""" + baseline = HRVBaseline( + low_upper=45, + balanced_low=50, + balanced_upper=70, + marker_value=55.0, + ) + summary = HRVSummary( + calendar_date="2026-03-24", + weekly_avg=58, + last_night_avg=62, + last_night_5_min_high=85, + baseline=baseline, + status="BALANCED", + feedback_phrase="Your HRV is balanced", + create_time_stamp="2026-03-24T06:00:00.0", + ) + readings = [ + HRVReading( + hrv_value=55, + reading_time_gmt="2026-03-24T01:00:00.0", + reading_time_local="2026-03-23T20:00:00.0", + ), + HRVReading( + hrv_value=62, + reading_time_gmt="2026-03-24T01:05:00.0", + reading_time_local="2026-03-23T20:05:00.0", + ), + HRVReading( + hrv_value=70, + reading_time_gmt="2026-03-24T01:10:00.0", + reading_time_local="2026-03-23T20:10:00.0", + ), + ] + + kwargs = dict( + user_profile_pk=12345, + hrv_summary=summary, + hrv_readings=readings, + ) + kwargs.update(overrides) + return HRV(**kwargs) + + def test_extract_hrv_timeseries(self): + """Test timeseries extraction converts ISO timestamps to unix ms.""" + hrv = self.create_sample_hrv_data() + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(hrv, MetricType.HRV) + + assert len(timeseries) == 3 + + # Verify first reading + ts, value, meta = timeseries[0] + assert value == 55 + assert meta == {} + # Timestamp should be a positive integer (unix ms) + assert isinstance(ts, int) + assert ts > 0 + + def test_extract_hrv_timeseries_skips_none_values(self): + """Test that readings with None hrv_value are skipped.""" + hrv = self.create_sample_hrv_data( + hrv_readings=[ + HRVReading( + hrv_value=0, # zero is valid + reading_time_gmt="2026-03-24T01:00:00.0", + reading_time_local="2026-03-23T20:00:00.0", + ), + ] + ) + + # Manually set one reading's hrv_value to None + reading_with_none = HRVReading( + hrv_value=55, + reading_time_gmt="2026-03-24T01:05:00.0", + reading_time_local="", + ) + object.__setattr__(reading_with_none, "hrv_value", None) + + hrv.hrv_readings.append(reading_with_none) + + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(hrv, MetricType.HRV) + + values = [t[1] for t in timeseries] + assert None not in values + + def test_extract_hrv_timeseries_empty_readings(self): + """Test timeseries extraction with no readings.""" + hrv = self.create_sample_hrv_data(hrv_readings=[]) + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(hrv, MetricType.HRV) + + assert timeseries == [] + + def test_extract_hrv_timeseries_handles_z_suffix(self): + """Test ISO timestamp with Z suffix is handled correctly.""" + hrv = self.create_sample_hrv_data( + hrv_readings=[ + HRVReading( + hrv_value=60, + reading_time_gmt="2026-03-24T01:00:00Z", + reading_time_local="2026-03-23T20:00:00Z", + ), + ] + ) + + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(hrv, MetricType.HRV) + + assert len(timeseries) == 1 + assert timeseries[0][1] == 60 + + +class TestHRVExpandedDailySummary: + """Test HRV expanded daily summary extraction with baseline fields.""" + + def create_sample_hrv_data(self, **overrides): + """Create a realistic HRV data object for testing.""" + baseline = HRVBaseline( + low_upper=45, + balanced_low=50, + balanced_upper=70, + marker_value=55.0, + ) + summary = HRVSummary( + calendar_date="2026-03-24", + weekly_avg=58, + last_night_avg=62, + last_night_5_min_high=85, + baseline=baseline, + status="BALANCED", + feedback_phrase="Your HRV is balanced", + create_time_stamp="2026-03-24T06:00:00.0", + ) + kwargs = dict( + user_profile_pk=12345, + hrv_summary=summary, + hrv_readings=[], + ) + kwargs.update(overrides) + return HRV(**kwargs) + + def test_extract_hrv_summary_with_baseline(self): + """Test extraction includes baseline fields.""" + hrv = self.create_sample_hrv_data() + extractor = DataExtractor() + result = extractor.extract_metric_data(hrv, MetricType.HRV) + + assert result["hrv_weekly_avg"] == 58 + assert result["hrv_last_night_avg"] == 62 + assert result["hrv_status"] == "BALANCED" + assert result["hrv_last_night_5min_high"] == 85 + assert result["hrv_baseline_low_upper"] == 45 + assert result["hrv_baseline_balanced_low"] == 50 + assert result["hrv_baseline_balanced_upper"] == 70 + + def test_extract_hrv_summary_no_summary(self): + """Test extraction returns empty dict when no summary.""" + hrv = self.create_sample_hrv_data() + object.__setattr__(hrv, "hrv_summary", None) + + extractor = DataExtractor() + result = extractor.extract_metric_data(hrv, MetricType.HRV) + + assert result == {} + + def test_extract_hrv_summary_no_baseline(self): + """Test extraction when baseline is None.""" + hrv = self.create_sample_hrv_data() + object.__setattr__(hrv.hrv_summary, "baseline", None) + + extractor = DataExtractor() + result = extractor.extract_metric_data(hrv, MetricType.HRV) + + assert result["hrv_weekly_avg"] == 58 + assert "hrv_baseline_low_upper" not in result diff --git a/tests/test_metrics_comprehensive.py b/tests/test_metrics_comprehensive.py index d09dacf..931e800 100644 --- a/tests/test_metrics_comprehensive.py +++ b/tests/test_metrics_comprehensive.py @@ -42,6 +42,7 @@ def test_package_imports(self): HeartRate, Respiration, Sleep, + SpO2, Steps, Stress, TrainingReadiness, @@ -57,6 +58,7 @@ def test_package_imports(self): HeartRate, Respiration, Sleep, + SpO2, Steps, Stress, TrainingReadiness, @@ -78,6 +80,7 @@ def test_all_exports(self): "HeartRate", "Respiration", "Sleep", + "SpO2", "Steps", "Stress", "TrainingReadiness", @@ -1365,6 +1368,7 @@ def test_all_metrics_have_metric_configs(self): "calories", "daily_summary", "respiration", + "spo2", "steps", "stress", ] @@ -1487,6 +1491,7 @@ def test_metric_configs_have_required_fields(self): "calories", "daily_summary", "respiration", + "spo2", "steps", "stress", ] diff --git a/tests/test_metrics_remaining.py b/tests/test_metrics_remaining.py index 78aed39..bbd0f68 100644 --- a/tests/test_metrics_remaining.py +++ b/tests/test_metrics_remaining.py @@ -467,6 +467,7 @@ def test_all_metrics_modules_importable(self): "calories", "daily_summary", "respiration", + "spo2", "steps", "stress", ] @@ -489,6 +490,7 @@ def test_all_exported_classes_are_dataclasses(self): HeartRate, Respiration, Sleep, + SpO2, Steps, Stress, TrainingReadiness, @@ -503,6 +505,7 @@ def test_all_exported_classes_are_dataclasses(self): HeartRate, Respiration, Sleep, + SpO2, Steps, Stress, TrainingReadiness, @@ -525,6 +528,7 @@ def test_metric_configs_consistency(self): "calories", "daily_summary", "respiration", + "spo2", "steps", "stress", ] @@ -571,6 +575,7 @@ def test_endpoint_or_builder_present(self): "calories", "daily_summary", "respiration", + "spo2", "steps", "stress", ] diff --git a/tests/test_spo2_metric.py b/tests/test_spo2_metric.py new file mode 100644 index 0000000..f709ad9 --- /dev/null +++ b/tests/test_spo2_metric.py @@ -0,0 +1,182 @@ +"""Tests for SpO2 metric module: dataclass, parser, and timeseries extraction.""" + +import pytest + +from garmy.core.base import MetricConfig +from garmy.localdb.extractors import DataExtractor +from garmy.localdb.models import MetricType +from garmy.metrics.spo2 import SpO2, parse_spo2_data + + +class TestSpO2Parsing: + """Test SpO2 API response parsing.""" + + def create_sample_spo2_response(self, **overrides): + """Create a realistic SpO2 API response matching actual Garmin API.""" + response = { + "calendarDate": "2026-03-24", + "averageSpO2": 97.0, + "lowestSpO2": 91, + "latestSpO2": 100, + "avgSleepSpO2": 98.0, + "lastSevenDaysAvgSpO2": 96.14, + "spO2HourlyAverages": [ + [1711238400000, 99], + [1711242000000, 97], + [1711245600000, 96], + ], + "continuousReadingDTOList": None, + } + response.update(overrides) + return response + + def test_parse_full_response(self): + """Test parsing a complete SpO2 API response.""" + data = self.create_sample_spo2_response() + result = parse_spo2_data(data) + + assert isinstance(result, SpO2) + assert result.calendar_date == "2026-03-24" + assert result.average_spo2 == 97.0 + assert result.lowest_spo2 == 91 + assert result.latest_spo2 == 100 + assert result.avg_sleep_spo2 == 98.0 + assert result.last_seven_days_avg_spo2 == 96.14 + assert len(result.spo2_hourly_averages) == 3 + + def test_parse_hourly_averages(self): + """Test hourly average readings are parsed correctly.""" + data = self.create_sample_spo2_response() + result = parse_spo2_data(data) + + reading = result.spo2_hourly_averages[0] + assert reading[0] == 1711238400000 + assert reading[1] == 99 + + def test_parse_empty_response(self): + """Test parsing an empty/minimal response.""" + data = {"calendarDate": "2026-03-24"} + result = parse_spo2_data(data) + + assert result.calendar_date == "2026-03-24" + assert result.average_spo2 is None + assert result.lowest_spo2 is None + assert result.latest_spo2 is None + assert result.spo2_hourly_averages == [] + + def test_readings_count_property(self): + """Test readings_count property.""" + data = self.create_sample_spo2_response() + result = parse_spo2_data(data) + assert result.readings_count == 3 + + def test_valid_readings_count_property(self): + """Test valid_readings_count excludes None values.""" + spo2 = SpO2( + spo2_hourly_averages=[ + [1711238400000, 99], + [1711242000000, None], + [1711245600000, 96], + ] + ) + assert spo2.valid_readings_count == 2 + + def test_parse_invalid_data_raises(self): + """Test that non-dict input raises ValueError.""" + with pytest.raises(ValueError, match="Expected dictionary"): + parse_spo2_data("not a dict") + + +class TestSpO2MetricConfig: + """Test SpO2 metric configuration for auto-discovery.""" + + def test_metric_config_exists(self): + """Test that __metric_config__ is exported.""" + from garmy.metrics.spo2 import __metric_config__ + + assert __metric_config__ is not None + assert __metric_config__.endpoint == "/wellness-service/wellness/daily/spo2/{date}" + assert __metric_config__.metric_class is SpO2 + assert __metric_config__.parser is parse_spo2_data + assert isinstance(__metric_config__, MetricConfig) + + def test_spo2_importable_from_package(self): + """Test SpO2 is importable from garmy.metrics.""" + from garmy.metrics import SpO2 as SpO2Import + + assert SpO2Import is SpO2 + + +class TestSpO2TimeseriesExtraction: + """Test SpO2 timeseries data extraction for localdb storage.""" + + def test_extract_spo2_timeseries(self): + """Test timeseries extraction from SpO2 hourly averages.""" + spo2 = SpO2( + calendar_date="2026-03-24", + average_spo2=97.0, + spo2_hourly_averages=[ + [1711238400000, 99], + [1711242000000, 97], + [1711245600000, 96], + ], + ) + + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(spo2, MetricType.SPO2) + + assert len(timeseries) == 3 + ts, value, meta = timeseries[0] + assert ts == 1711238400000 + assert value == 99 + assert meta == {} + + def test_extract_spo2_timeseries_skips_none_values(self): + """Test that readings with None value are skipped.""" + spo2 = SpO2( + spo2_hourly_averages=[ + [1711238400000, None], + [1711242000000, 97], + ], + ) + + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(spo2, MetricType.SPO2) + + assert len(timeseries) == 1 + assert timeseries[0][1] == 97 + + def test_extract_spo2_timeseries_empty_readings(self): + """Test timeseries extraction with no readings.""" + spo2 = SpO2(spo2_hourly_averages=[]) + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(spo2, MetricType.SPO2) + + assert timeseries == [] + + +class TestSpO2DailySummaryExtraction: + """Test SpO2 daily summary extraction for daily_health_metrics.""" + + def test_extract_spo2_summary(self): + """Test daily summary extraction from SpO2 data.""" + spo2 = SpO2( + calendar_date="2026-03-24", + average_spo2=97.0, + lowest_spo2=91, + ) + + extractor = DataExtractor() + result = extractor.extract_metric_data(spo2, MetricType.SPO2) + + assert result["average_spo2"] == 97.0 + assert result["lowest_spo2"] == 91 + + def test_extract_spo2_summary_none_values(self): + """Test extraction when fields are None.""" + spo2 = SpO2() + extractor = DataExtractor() + result = extractor.extract_metric_data(spo2, MetricType.SPO2) + + assert result["average_spo2"] is None + assert result["lowest_spo2"] is None From 40bd57a2ddcfe6ade063604a98de49abfbadfb6d Mon Sep 17 00:00:00 2001 From: Jaakko Tiistola Date: Tue, 24 Mar 2026 17:47:43 -0700 Subject: [PATCH 22/26] Add resting HR, intensity minutes, and floors daily aggregate metrics Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/database-schema.md | 23 +- src/garmy/core/endpoint_builders.py | 23 ++ src/garmy/localdb/db.py | 20 + src/garmy/localdb/extractors.py | 45 +++ src/garmy/localdb/models.py | 19 + src/garmy/localdb/sync.py | 4 + src/garmy/metrics/__init__.py | 6 + src/garmy/metrics/floors.py | 114 ++++++ src/garmy/metrics/intensity_minutes.py | 117 ++++++ src/garmy/metrics/resting_heart_rate.py | 92 +++++ tests/test_daily_aggregate_metrics.py | 476 ++++++++++++++++++++++++ tests/test_metrics_comprehensive.py | 9 + 12 files changed, 946 insertions(+), 2 deletions(-) create mode 100644 src/garmy/metrics/floors.py create mode 100644 src/garmy/metrics/intensity_minutes.py create mode 100644 src/garmy/metrics/resting_heart_rate.py create mode 100644 tests/test_daily_aggregate_metrics.py diff --git a/docs/database-schema.md b/docs/database-schema.md index b680a8b..59c7c39 100644 --- a/docs/database-schema.md +++ b/docs/database-schema.md @@ -23,7 +23,9 @@ daily_health_metrics (Primary health data) ├── Stress: avg_stress_level, max_stress_level ├── Body Battery: body_battery_high, body_battery_low ├── Training: training_readiness_score, training_readiness_level -└── HRV: hrv_weekly_avg, hrv_last_night_avg, hrv_status +├── HRV: hrv_weekly_avg, hrv_last_night_avg, hrv_status +├── Intensity: moderate_intensity_minutes, vigorous_intensity_minutes +└── Floors: floors_ascended, floors_descended timeseries (High-frequency data) ├── user_id, metric_type, timestamp (PK) @@ -120,6 +122,19 @@ avg_sleep_respiration_value FLOAT lowest_respiration_value FLOAT highest_respiration_value FLOAT +-- Intensity Minutes +moderate_intensity_minutes INTEGER -- Weekly cumulative moderate minutes (from API) +vigorous_intensity_minutes INTEGER -- Weekly cumulative vigorous minutes (from API) +intensity_minutes_total INTEGER -- Daily earned total (sum of 15-min timeseries) +intensity_minutes_goal INTEGER -- Weekly goal (typically 150) + +-- Floors +floors_ascended INTEGER -- Floors climbed during the day +floors_descended INTEGER -- Floors descended during the day + +-- Dedicated Resting HR +dedicated_resting_heart_rate INTEGER -- From dedicated userstats endpoint + -- Training and HRV training_readiness_score INTEGER -- Training readiness (0-100) training_readiness_level TEXT -- Readiness level description @@ -158,6 +173,7 @@ meta_data JSON -- Additional metadata (optional) - `respiration` - Respiration rate readings - `hrv` - Heart rate variability readings (during sleep) - `spo2` - Blood oxygen saturation readings (overnight) +- `intensity_minutes` - Intensity minutes earned (15-min intervals) ### `activities` **Purpose:** Individual workouts and physical activities @@ -518,7 +534,7 @@ All tables use `user_id` as the primary identifier, allowing multi-user support. Supported metric types in `sync_status` and `timeseries`: - `DAILY_SUMMARY` - `SLEEP` -- `ACTIVITIES` +- `ACTIVITIES` - `BODY_BATTERY` - `STRESS` - `HEART_RATE` @@ -528,6 +544,9 @@ Supported metric types in `sync_status` and `timeseries`: - `STEPS` - `CALORIES` - `SPO2` +- `RESTING_HEART_RATE` +- `INTENSITY_MINUTES` +- `FLOORS` ## 🔧 Performance Considerations diff --git a/src/garmy/core/endpoint_builders.py b/src/garmy/core/endpoint_builders.py index 3dd6e5c..caf423b 100644 --- a/src/garmy/core/endpoint_builders.py +++ b/src/garmy/core/endpoint_builders.py @@ -225,3 +225,26 @@ def build_daily_summary_endpoint( """Build daily summary endpoint URL.""" builder = UserSummaryEndpointBuilder("daily summary", "") return builder.build(date_input, api_client, **kwargs) + + +class UserStatsEndpointBuilder(BaseEndpointBuilder): + """Builder for user stats service endpoints (e.g. resting heart rate).""" + + def get_endpoint_name(self) -> str: + """Get the name of this endpoint.""" + return "resting heart rate" + + def build_endpoint_url(self, user_id: str, date_str: str, **kwargs: Any) -> str: + """Build user stats service URL.""" + return ( + f"/userstats-service/wellness/daily/{user_id}" + f"?fromDate={date_str}&untilDate={date_str}" + ) + + +def build_resting_heart_rate_endpoint( + date_input: Union["date", str, None] = None, api_client: Any = None, **kwargs: Any +) -> str: + """Build resting heart rate endpoint URL.""" + builder = UserStatsEndpointBuilder() + return builder.build(date_input, api_client, **kwargs) diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index f300a89..d4f78c8 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -115,6 +115,16 @@ def _migrate_schema(self): ("skin_temp_deviation_c", "FLOAT"), # SpO2 fields ("lowest_spo2", "FLOAT"), + # Intensity minutes + ("moderate_intensity_minutes", "INTEGER"), + ("vigorous_intensity_minutes", "INTEGER"), + ("intensity_minutes_total", "INTEGER"), + ("intensity_minutes_goal", "INTEGER"), + # Floors + ("floors_ascended", "INTEGER"), + ("floors_descended", "INTEGER"), + # Dedicated resting HR + ("dedicated_resting_heart_rate", "INTEGER"), # HRV baseline fields ("hrv_last_night_5min_high", "FLOAT"), ("hrv_baseline_low_upper", "FLOAT"), @@ -502,6 +512,16 @@ def _metric_to_dict(self, metric: DailyHealthMetric) -> Dict[str, Any]: "avg_sleep_respiration_value": metric.avg_sleep_respiration_value, "lowest_respiration_value": metric.lowest_respiration_value, "highest_respiration_value": metric.highest_respiration_value, + # Intensity minutes + "moderate_intensity_minutes": metric.moderate_intensity_minutes, + "vigorous_intensity_minutes": metric.vigorous_intensity_minutes, + "intensity_minutes_total": metric.intensity_minutes_total, + "intensity_minutes_goal": metric.intensity_minutes_goal, + # Floors + "floors_ascended": metric.floors_ascended, + "floors_descended": metric.floors_descended, + # Dedicated resting HR + "dedicated_resting_heart_rate": metric.dedicated_resting_heart_rate, # Sleep enhancements "sleep_score": metric.sleep_score, "sleep_score_qualifier": metric.sleep_score_qualifier, diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 762be46..27bd29f 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -37,6 +37,12 @@ def extract_metric_data( return self._extract_body_battery_summary(data) elif metric_type == MetricType.SPO2: return self._extract_spo2_data(data) + elif metric_type == MetricType.RESTING_HEART_RATE: + return self._extract_resting_heart_rate_data(data) + elif metric_type == MetricType.INTENSITY_MINUTES: + return self._extract_intensity_minutes_data(data) + elif metric_type == MetricType.FLOORS: + return self._extract_floors_data(data) elif metric_type == MetricType.BODY_COMPOSITION: return self._extract_body_composition_data(data) else: @@ -279,6 +285,35 @@ def _extract_spo2_data(self, data: Any) -> Dict[str, Any]: "lowest_spo2": getattr(data, "lowest_spo2", None), } + def _extract_resting_heart_rate_data(self, data: Any) -> Dict[str, Any]: + """Extract dedicated resting heart rate data.""" + return { + "dedicated_resting_heart_rate": getattr(data, "value", None), + } + + def _extract_intensity_minutes_data(self, data: Any) -> Dict[str, Any]: + """Extract intensity minutes daily summary. + + - moderate/vigorous: weekly cumulative values from the API + - intensity_minutes_total: daily earned total, computed by summing + the imValuesArray timeseries (not the weekly cumulative) + - intensity_minutes_goal: weekly goal (typically 150) + """ + return { + "moderate_intensity_minutes": getattr(data, "moderate_minutes", None), + "vigorous_intensity_minutes": getattr(data, "vigorous_minutes", None), + # Daily total computed from timeseries, not the weekly cumulative + "intensity_minutes_total": getattr(data, "daily_total", None), + "intensity_minutes_goal": getattr(data, "week_goal", None), + } + + def _extract_floors_data(self, data: Any) -> Dict[str, Any]: + """Extract floors data.""" + return { + "floors_ascended": getattr(data, "floors_ascended", None), + "floors_descended": getattr(data, "floors_descended", None), + } + def _extract_activity_data(self, data: Any) -> Dict[str, Any]: """Extract activity data from both parsed and raw formats. @@ -422,6 +457,16 @@ def extract_timeseries_data( if spo2_value is not None: timeseries_data.append((timestamp, spo2_value, {})) + elif metric_type == MetricType.INTENSITY_MINUTES: + # 15-minute interval readings from imValuesArray. + # Each entry is [timestamp_ms, intensity_minutes_earned]. + if hasattr(data, "im_values_array") and data.im_values_array: + for reading in data.im_values_array: + if isinstance(reading, (list, tuple)) and len(reading) >= 2: + timestamp, im_value = reading[0], reading[1] + if im_value is not None: + timeseries_data.append((timestamp, int(im_value), {})) + return timeseries_data def _extract_steps_data(self, data: Any) -> Dict[str, Any]: diff --git a/src/garmy/localdb/models.py b/src/garmy/localdb/models.py index f926ebb..f0118b8 100644 --- a/src/garmy/localdb/models.py +++ b/src/garmy/localdb/models.py @@ -35,6 +35,9 @@ class MetricType(Enum): CALORIES = "calories" BODY_COMPOSITION = "body_composition" SPO2 = "spo2" + RESTING_HEART_RATE = "resting_heart_rate" + INTENSITY_MINUTES = "intensity_minutes" + FLOORS = "floors" class TimeSeries(Base): @@ -216,6 +219,22 @@ class DailyHealthMetric(Base): lowest_respiration_value = Column(Float) highest_respiration_value = Column(Float) + # Intensity minutes + # moderate/vigorous are weekly cumulative values from the API + moderate_intensity_minutes = Column(Integer) + vigorous_intensity_minutes = Column(Integer) + # Daily total: sum of 15-min timeseries values (not weekly cumulative) + intensity_minutes_total = Column(Integer) + # Weekly goal (typically 150) + intensity_minutes_goal = Column(Integer) + + # Floors + floors_ascended = Column(Integer) + floors_descended = Column(Integer) + + # Dedicated resting HR (separate from daily_summary/heart_rate sources) + dedicated_resting_heart_rate = Column(Integer) + # Sleep enhancements sleep_score = Column(Integer) # 0-100 overall score sleep_score_qualifier = Column(String) # POOR, FAIR, GOOD, EXCELLENT diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index 03efabb..c238168 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -253,6 +253,7 @@ def _sync_metric_for_date( MetricType.RESPIRATION, MetricType.HRV, MetricType.SPO2, + MetricType.INTENSITY_MINUTES, ]: timeseries_data = self.extractor.extract_timeseries_data( data, metric_type @@ -779,6 +780,9 @@ def _store_health_metric( MetricType.STEPS, MetricType.CALORIES, MetricType.SPO2, + MetricType.RESTING_HEART_RATE, + MetricType.INTENSITY_MINUTES, + MetricType.FLOORS, ]: # Store all extracted data for these metrics self.db.store_health_metric(user_id, sync_date, **data) diff --git a/src/garmy/metrics/__init__.py b/src/garmy/metrics/__init__.py index 58f8d0b..24f3658 100644 --- a/src/garmy/metrics/__init__.py +++ b/src/garmy/metrics/__init__.py @@ -83,9 +83,12 @@ from .body_battery import BodyBattery from .calories import Calories from .daily_summary import DailySummary +from .floors import Floors from .heart_rate import HeartRate from .hrv import HRV +from .intensity_minutes import IntensityMinutes from .respiration import Respiration +from .resting_heart_rate import RestingHeartRate from .sleep import Sleep from .spo2 import SpO2 from .steps import Steps @@ -93,13 +96,16 @@ from .training_readiness import TrainingReadiness __all__: List[str] = [ + "Floors", "HRV", "ActivitySummary", "BodyBattery", "Calories", "DailySummary", "HeartRate", + "IntensityMinutes", "Respiration", + "RestingHeartRate", "Sleep", "SpO2", "Steps", diff --git a/src/garmy/metrics/floors.py b/src/garmy/metrics/floors.py new file mode 100644 index 0000000..438b72d --- /dev/null +++ b/src/garmy/metrics/floors.py @@ -0,0 +1,114 @@ +"""Floors metric module. + +This module provides access to Garmin floors climbed data using the +auto-discovery architecture. The API returns a time series of floor values +which must be summed to produce daily totals. + +Data Source: + Garmin Connect API endpoint: /wellness-service/wellness/floorsChartData/daily/{date} +""" + +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from ..core.base import MetricConfig +from ..core.utils import camel_to_snake_dict + + +@dataclass +class Floors: + """Daily floors data from Garmin Connect API. + + Floors climbed and descended during the day, computed from the watch + barometer. The API returns an array of readings that must be summed. + + Attributes: + calendar_date: Date string (YYYY-MM-DD) + floors_ascended: Total floors climbed during the day + floors_descended: Total floors descended during the day + + Example: + >>> floors = garmy.floors.get() + >>> print(f"Climbed: {floors.floors_ascended} floors") + >>> print(f"Descended: {floors.floors_descended} floors") + """ + + calendar_date: str = "" + floors_ascended: Optional[int] = None + floors_descended: Optional[int] = None + + +def parse_floors_data(data: Dict[str, Any]) -> Floors: + """Parse floors chart API response into structured data. + + The API returns floorValuesArray with [timestamp_ms, ascended, descended] + triples. Daily totals are computed by summing the array. + """ + snake_dict = camel_to_snake_dict(data) + + if not isinstance(snake_dict, dict): + raise ValueError( + f"Expected dictionary from API response but got {type(snake_dict).__name__}. " + f"Raw data: {data}" + ) + + # Extract calendar date from startTimestampGMT or fallback + calendar_date = snake_dict.get("calendar_date", "") + if not calendar_date: + # Try to derive from startTimestampGMT + start_ts = snake_dict.get("start_timestamp_gmt") or snake_dict.get( + "start_timestamp_local" + ) + if start_ts and isinstance(start_ts, str): + # ISO format: take date portion + calendar_date = start_ts[:10] + + # Sum floor values from the array + floor_values = snake_dict.get("floor_values_array") or [] + floors_ascended = None + floors_descended = None + + if floor_values: + total_ascended = 0 + total_descended = 0 + has_data = False + + for entry in floor_values: + if isinstance(entry, (list, tuple)) and len(entry) >= 3: + ascended = entry[1] + descended = entry[2] + try: + if ascended is not None: + total_ascended += int(ascended) + has_data = True + except (ValueError, TypeError): + pass + try: + if descended is not None: + total_descended += int(descended) + has_data = True + except (ValueError, TypeError): + pass + + if has_data: + floors_ascended = total_ascended + floors_descended = total_descended + + return Floors( + calendar_date=calendar_date, + floors_ascended=floors_ascended, + floors_descended=floors_descended, + ) + + +# Declarative configuration for auto-discovery with custom parser +METRIC_CONFIG = MetricConfig( + endpoint="/wellness-service/wellness/floorsChartData/daily/{date}", + metric_class=Floors, + parser=parse_floors_data, + description="Daily floors climbed and descended from barometer data", + version="1.0", +) + +# Export for auto-discovery +__metric_config__ = METRIC_CONFIG diff --git a/src/garmy/metrics/intensity_minutes.py b/src/garmy/metrics/intensity_minutes.py new file mode 100644 index 0000000..49ee197 --- /dev/null +++ b/src/garmy/metrics/intensity_minutes.py @@ -0,0 +1,117 @@ +"""Intensity Minutes metric module. + +This module provides access to Garmin intensity minutes data using the +auto-discovery architecture. The API returns both a 15-minute timeseries +(imValuesArray) and weekly cumulative summary values. + +Data Source: + Garmin Connect API endpoint: /wellness-service/wellness/daily/im/{date} + +Storage strategy: + - Timeseries: imValuesArray stored in the timeseries table at 15-min resolution. + Each value is the intensity minutes earned in that 15-min window. + - Daily summary: intensity_minutes_total is computed as the sum of the + timeseries values (actual minutes earned that day, not weekly cumulative). + - Weekly context: moderate_intensity_minutes and vigorous_intensity_minutes + store weekly cumulative values from the API (useful for weekly goal tracking). + +API field mapping (camelCase → snake_case after camel_to_snake_dict): + moderateMinutes → moderate_minutes (weekly cumulative moderate) + vigorousMinutes → vigorous_minutes (weekly cumulative vigorous) + weeklyTotal → weekly_total (WHO-weighted: moderate + 2*vigorous) + weekGoal → week_goal (weekly goal, typically 150) + imValuesArray → im_values_array (15-min timeseries: [timestamp_ms, value]) +""" + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from ..core.base import MetricConfig +from ..core.utils import camel_to_snake_dict + + +@dataclass +class IntensityMinutes: + """Intensity minutes data from Garmin Connect API. + + Contains both the 15-minute timeseries and weekly cumulative summaries. + Vigorous minutes count double toward the weekly goal (WHO formula). + + Attributes: + calendar_date: Date string (YYYY-MM-DD) + moderate_minutes: Weekly cumulative moderate intensity minutes + vigorous_minutes: Weekly cumulative vigorous intensity minutes + weekly_total: WHO-weighted weekly total (moderate + 2x vigorous) + week_goal: Weekly intensity minutes goal (typically 150) + im_values_array: 15-min timeseries as [timestamp_ms, value] pairs. + Each value is the intensity minutes earned in that window. + + Example: + >>> im = garmy.intensity_minutes.get() + >>> print(f"Weekly total: {im.weekly_total} / {im.week_goal}") + >>> print(f"Today's readings: {im.readings_count}") + """ + + calendar_date: str = "" + moderate_minutes: Optional[int] = None + vigorous_minutes: Optional[int] = None + weekly_total: Optional[int] = None + week_goal: Optional[int] = None + im_values_array: List[List[Any]] = field(default_factory=list) + + @property + def readings_count(self) -> int: + """Get number of 15-minute readings.""" + return len(self.im_values_array) + + @property + def daily_total(self) -> Optional[int]: + """Compute daily intensity minutes earned from the timeseries. + + Sums all non-None values in imValuesArray. Returns None if no + readings are available. + """ + if not self.im_values_array: + return None + total = 0 + has_data = False + for entry in self.im_values_array: + if isinstance(entry, (list, tuple)) and len(entry) >= 2: + value = entry[1] + if value is not None: + total += int(value) + has_data = True + return total if has_data else None + + +def parse_intensity_minutes_data(data: Dict[str, Any]) -> IntensityMinutes: + """Parse intensity minutes API response into structured data.""" + snake_dict = camel_to_snake_dict(data) + + if not isinstance(snake_dict, dict): + raise ValueError( + f"Expected dictionary from API response but got {type(snake_dict).__name__}. " + f"Raw data: {data}" + ) + + return IntensityMinutes( + calendar_date=snake_dict.get("calendar_date", ""), + moderate_minutes=snake_dict.get("moderate_minutes"), + vigorous_minutes=snake_dict.get("vigorous_minutes"), + weekly_total=snake_dict.get("weekly_total"), + week_goal=snake_dict.get("week_goal"), + im_values_array=snake_dict.get("im_values_array") or [], + ) + + +# Declarative configuration for auto-discovery with custom parser +METRIC_CONFIG = MetricConfig( + endpoint="/wellness-service/wellness/daily/im/{date}", + metric_class=IntensityMinutes, + parser=parse_intensity_minutes_data, + description="Intensity minutes with 15-min timeseries and weekly cumulative summaries", + version="1.0", +) + +# Export for auto-discovery +__metric_config__ = METRIC_CONFIG diff --git a/src/garmy/metrics/resting_heart_rate.py b/src/garmy/metrics/resting_heart_rate.py new file mode 100644 index 0000000..dce6447 --- /dev/null +++ b/src/garmy/metrics/resting_heart_rate.py @@ -0,0 +1,92 @@ +"""Resting Heart Rate metric module. + +This module provides access to the dedicated Garmin resting heart rate endpoint +using the auto-discovery architecture. This endpoint provides the single resting +HR value computed each morning, separate from the general heart rate endpoint. + +Data Source: + Garmin Connect API endpoint: + /userstats-service/wellness/daily/{display_name}?fromDate={date}&untilDate={date} +""" + +from dataclasses import dataclass +from typing import Any, Dict, Optional + +from ..core.base import MetricConfig +from ..core.endpoint_builders import build_resting_heart_rate_endpoint + + +@dataclass +class RestingHeartRate: + """Dedicated resting heart rate data from Garmin Connect API. + + The single resting HR value Garmin computes each morning, sourced from + the user stats service rather than the general heart rate endpoint. + + Attributes: + calendar_date: Date string (YYYY-MM-DD) + value: Resting heart rate in bpm + + Example: + >>> rhr = garmy.resting_heart_rate.get() + >>> print(f"Resting HR: {rhr.value} bpm") + """ + + calendar_date: str = "" + value: Optional[int] = None + + +def parse_resting_heart_rate_data(data: Dict[str, Any]) -> RestingHeartRate: + """Parse resting heart rate API response into structured data. + + The API response is deeply nested with uppercase keys + (allMetrics.metricsMap.WELLNESS_RESTING_HEART_RATE), so camel_to_snake_dict + is not used here — the structure requires manual navigation. + """ + if not isinstance(data, dict): + raise ValueError( + f"Expected dictionary from API response but got {type(data).__name__}. " + f"Raw data: {data}" + ) + + calendar_date = "" + value = None + + # Navigate nested response structure + all_metrics = data.get("allMetrics", {}) + metrics_map = ( + all_metrics.get("metricsMap", {}) if isinstance(all_metrics, dict) else {} + ) + rhr_entries = metrics_map.get("WELLNESS_RESTING_HEART_RATE", []) + + if rhr_entries and isinstance(rhr_entries, list): + # Take the first (and typically only) entry + entry = rhr_entries[0] + if isinstance(entry, dict): + calendar_date = entry.get("calendarDate", "") + value = entry.get("value") + if value is not None: + try: + value = int(value) + except (ValueError, TypeError): + value = None + + return RestingHeartRate( + calendar_date=calendar_date, + value=value, + ) + + +# Declarative configuration for auto-discovery with endpoint builder +METRIC_CONFIG = MetricConfig( + endpoint="/userstats-service/wellness/daily", + metric_class=RestingHeartRate, + parser=parse_resting_heart_rate_data, + endpoint_builder=build_resting_heart_rate_endpoint, + requires_user_id=True, + description="Dedicated resting heart rate from user stats service", + version="1.0", +) + +# Export for auto-discovery +__metric_config__ = METRIC_CONFIG diff --git a/tests/test_daily_aggregate_metrics.py b/tests/test_daily_aggregate_metrics.py new file mode 100644 index 0000000..bbfa3e9 --- /dev/null +++ b/tests/test_daily_aggregate_metrics.py @@ -0,0 +1,476 @@ +"""Tests for PR 2 daily aggregate metrics: IntensityMinutes, Floors, RestingHeartRate.""" + +import pytest + +from garmy.core.base import MetricConfig +from garmy.localdb.extractors import DataExtractor +from garmy.localdb.models import MetricType +from garmy.metrics.floors import Floors, parse_floors_data +from garmy.metrics.intensity_minutes import IntensityMinutes, parse_intensity_minutes_data +from garmy.metrics.resting_heart_rate import RestingHeartRate, parse_resting_heart_rate_data + + +# --------------------------------------------------------------------------- +# IntensityMinutes +# --------------------------------------------------------------------------- + + +class TestIntensityMinutesParsing: + """Test IntensityMinutes API response parsing.""" + + def create_sample_response(self, **overrides): + """Create a realistic intensity minutes API response matching actual Garmin API.""" + response = { + "calendarDate": "2026-03-23", + "moderateMinutes": 52, + "vigorousMinutes": 36, + "weeklyTotal": 124, + "weekGoal": 150, + "startDayMinutes": 0, + "endDayMinutes": 124, + "weeklyModerate": 52, + "weeklyVigorous": 36, + "imValuesArray": [ + [1774276200000, 6], + [1774277100000, 21], + [1774278000000, 20], + ], + } + response.update(overrides) + return response + + def test_parse_full_response(self): + data = self.create_sample_response() + result = parse_intensity_minutes_data(data) + + assert isinstance(result, IntensityMinutes) + assert result.calendar_date == "2026-03-23" + assert result.moderate_minutes == 52 + assert result.vigorous_minutes == 36 + assert result.weekly_total == 124 + assert result.week_goal == 150 + assert len(result.im_values_array) == 3 + + def test_parse_empty_response(self): + data = {"calendarDate": "2026-03-24"} + result = parse_intensity_minutes_data(data) + + assert result.calendar_date == "2026-03-24" + assert result.moderate_minutes is None + assert result.vigorous_minutes is None + assert result.weekly_total is None + assert result.week_goal is None + assert result.im_values_array == [] + + def test_parse_zero_values(self): + data = self.create_sample_response( + moderateMinutes=0, vigorousMinutes=0, weeklyTotal=0 + ) + result = parse_intensity_minutes_data(data) + + assert result.moderate_minutes == 0 + assert result.vigorous_minutes == 0 + assert result.weekly_total == 0 + + def test_daily_total_property(self): + """Test daily_total computes sum of timeseries values.""" + im = IntensityMinutes( + im_values_array=[[1774276200000, 6], [1774277100000, 21], [1774278000000, 20]] + ) + assert im.daily_total == 47 + + def test_daily_total_empty_array(self): + """Test daily_total returns None when no readings.""" + im = IntensityMinutes(im_values_array=[]) + assert im.daily_total is None + + def test_daily_total_skips_none_values(self): + """Test daily_total skips None values in array.""" + im = IntensityMinutes( + im_values_array=[[1774276200000, 6], [1774277100000, None], [1774278000000, 20]] + ) + assert im.daily_total == 26 + + def test_parse_invalid_data_raises(self): + with pytest.raises(ValueError, match="Expected dictionary"): + parse_intensity_minutes_data("not a dict") + + +class TestIntensityMinutesMetricConfig: + """Test IntensityMinutes metric configuration.""" + + def test_metric_config_exists(self): + from garmy.metrics.intensity_minutes import __metric_config__ + + assert __metric_config__ is not None + assert __metric_config__.endpoint == "/wellness-service/wellness/daily/im/{date}" + assert __metric_config__.metric_class is IntensityMinutes + assert __metric_config__.parser is parse_intensity_minutes_data + assert isinstance(__metric_config__, MetricConfig) + + def test_importable_from_package(self): + from garmy.metrics import IntensityMinutes as IMImport + + assert IMImport is IntensityMinutes + + +class TestIntensityMinutesExtraction: + """Test IntensityMinutes data extraction for daily_health_metrics.""" + + def test_extract_intensity_minutes(self): + im = IntensityMinutes( + calendar_date="2026-03-23", + moderate_minutes=52, + vigorous_minutes=36, + weekly_total=124, + week_goal=150, + im_values_array=[[1774276200000, 6], [1774277100000, 21], [1774278000000, 20]], + ) + + extractor = DataExtractor() + result = extractor.extract_metric_data(im, MetricType.INTENSITY_MINUTES) + + assert result["moderate_intensity_minutes"] == 52 + assert result["vigorous_intensity_minutes"] == 36 + # intensity_minutes_total is the daily sum from timeseries, not weekly_total + assert result["intensity_minutes_total"] == 47 # 6 + 21 + 20 + assert result["intensity_minutes_goal"] == 150 + + def test_extract_intensity_minutes_none_values(self): + im = IntensityMinutes() + extractor = DataExtractor() + result = extractor.extract_metric_data(im, MetricType.INTENSITY_MINUTES) + + assert result["moderate_intensity_minutes"] is None + assert result["vigorous_intensity_minutes"] is None + assert result["intensity_minutes_total"] is None + assert result["intensity_minutes_goal"] is None + + +class TestIntensityMinutesTimeseriesExtraction: + """Test IntensityMinutes timeseries extraction for localdb storage.""" + + def test_extract_timeseries(self): + """Test timeseries extraction from imValuesArray.""" + im = IntensityMinutes( + im_values_array=[ + [1774276200000, 6], + [1774277100000, 21], + [1774278000000, 20], + ], + ) + + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(im, MetricType.INTENSITY_MINUTES) + + assert len(timeseries) == 3 + ts, value, meta = timeseries[0] + assert ts == 1774276200000 + assert value == 6 + assert meta == {} + + def test_extract_timeseries_skips_none_values(self): + """Test that readings with None value are skipped.""" + im = IntensityMinutes( + im_values_array=[ + [1774276200000, None], + [1774277100000, 21], + ], + ) + + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(im, MetricType.INTENSITY_MINUTES) + + assert len(timeseries) == 1 + assert timeseries[0][1] == 21 + + def test_extract_timeseries_empty_array(self): + """Test timeseries extraction with no readings.""" + im = IntensityMinutes(im_values_array=[]) + extractor = DataExtractor() + timeseries = extractor.extract_timeseries_data(im, MetricType.INTENSITY_MINUTES) + + assert timeseries == [] + + +# --------------------------------------------------------------------------- +# Floors +# --------------------------------------------------------------------------- + + +class TestFloorsParsing: + """Test Floors API response parsing.""" + + def create_sample_response(self, **overrides): + """Create a realistic floors chart API response.""" + response = { + "startTimestampGMT": "2026-03-24T00:00:00.0", + "endTimestampGMT": "2026-03-25T00:00:00.0", + "floorValuesArray": [ + [1711238400000, 3, 1], + [1711242000000, 2, 0], + [1711245600000, 0, 2], + [1711249200000, 5, 3], + ], + } + response.update(overrides) + return response + + def test_parse_full_response(self): + data = self.create_sample_response() + result = parse_floors_data(data) + + assert isinstance(result, Floors) + assert result.calendar_date == "2026-03-24" + assert result.floors_ascended == 10 # 3 + 2 + 0 + 5 + assert result.floors_descended == 6 # 1 + 0 + 2 + 3 + + def test_parse_empty_array(self): + data = {"floorValuesArray": []} + result = parse_floors_data(data) + + assert result.floors_ascended is None + assert result.floors_descended is None + + def test_parse_missing_array(self): + data = {} + result = parse_floors_data(data) + + assert result.floors_ascended is None + assert result.floors_descended is None + + def test_parse_none_values_in_array(self): + """Test that None values in array entries are handled gracefully.""" + data = { + "floorValuesArray": [ + [1711238400000, 3, None], + [1711242000000, None, 2], + [1711245600000, 1, 1], + ], + } + result = parse_floors_data(data) + + assert result.floors_ascended == 4 # 3 + 0 + 1 + assert result.floors_descended == 3 # 0 + 2 + 1 + + def test_parse_malformed_entries_skipped(self): + """Test that entries with fewer than 3 elements are skipped.""" + data = { + "floorValuesArray": [ + [1711238400000, 3, 1], + [1711242000000], # malformed + [1711245600000, 2, 0], + ], + } + result = parse_floors_data(data) + + assert result.floors_ascended == 5 # 3 + 2 + assert result.floors_descended == 1 + + def test_parse_string_values_in_array(self): + """Test that string values in array entries are cast to int.""" + data = { + "floorValuesArray": [ + [1711238400000, "3", "1"], + [1711242000000, "2", "0"], + ], + } + result = parse_floors_data(data) + + assert result.floors_ascended == 5 + assert result.floors_descended == 1 + + def test_parse_invalid_data_raises(self): + with pytest.raises(ValueError, match="Expected dictionary"): + parse_floors_data("not a dict") + + def test_calendar_date_from_timestamp(self): + """Test date extraction from startTimestampGMT.""" + data = { + "startTimestampGMT": "2026-03-24T00:00:00.0", + "floorValuesArray": [[1711238400000, 1, 0]], + } + result = parse_floors_data(data) + assert result.calendar_date == "2026-03-24" + + +class TestFloorsMetricConfig: + """Test Floors metric configuration.""" + + def test_metric_config_exists(self): + from garmy.metrics.floors import __metric_config__ + + assert __metric_config__ is not None + assert ( + __metric_config__.endpoint + == "/wellness-service/wellness/floorsChartData/daily/{date}" + ) + assert __metric_config__.metric_class is Floors + assert __metric_config__.parser is parse_floors_data + assert isinstance(__metric_config__, MetricConfig) + + def test_importable_from_package(self): + from garmy.metrics import Floors as FloorsImport + + assert FloorsImport is Floors + + +class TestFloorsExtraction: + """Test Floors data extraction for daily_health_metrics.""" + + def test_extract_floors(self): + floors = Floors( + calendar_date="2026-03-24", + floors_ascended=10, + floors_descended=6, + ) + + extractor = DataExtractor() + result = extractor.extract_metric_data(floors, MetricType.FLOORS) + + assert result["floors_ascended"] == 10 + assert result["floors_descended"] == 6 + + def test_extract_floors_none_values(self): + floors = Floors() + extractor = DataExtractor() + result = extractor.extract_metric_data(floors, MetricType.FLOORS) + + assert result["floors_ascended"] is None + assert result["floors_descended"] is None + + +# --------------------------------------------------------------------------- +# RestingHeartRate +# --------------------------------------------------------------------------- + + +class TestRestingHeartRateParsing: + """Test RestingHeartRate API response parsing.""" + + def create_sample_response(self, **overrides): + """Create a realistic user stats API response.""" + response = { + "allMetrics": { + "metricsMap": { + "WELLNESS_RESTING_HEART_RATE": [ + { + "calendarDate": "2026-03-24", + "value": 52.0, + } + ] + } + } + } + # Apply overrides to the nested structure + for key, val in overrides.items(): + response[key] = val + return response + + def test_parse_full_response(self): + data = self.create_sample_response() + result = parse_resting_heart_rate_data(data) + + assert isinstance(result, RestingHeartRate) + assert result.calendar_date == "2026-03-24" + assert result.value == 52 + + def test_parse_empty_metrics_map(self): + data = {"allMetrics": {"metricsMap": {}}} + result = parse_resting_heart_rate_data(data) + + assert result.calendar_date == "" + assert result.value is None + + def test_parse_missing_all_metrics(self): + data = {} + result = parse_resting_heart_rate_data(data) + + assert result.calendar_date == "" + assert result.value is None + + def test_parse_empty_rhr_list(self): + data = { + "allMetrics": { + "metricsMap": { + "WELLNESS_RESTING_HEART_RATE": [] + } + } + } + result = parse_resting_heart_rate_data(data) + + assert result.value is None + + def test_parse_null_value(self): + data = { + "allMetrics": { + "metricsMap": { + "WELLNESS_RESTING_HEART_RATE": [ + {"calendarDate": "2026-03-24", "value": None} + ] + } + } + } + result = parse_resting_heart_rate_data(data) + + assert result.value is None + + def test_parse_float_value_converted_to_int(self): + data = { + "allMetrics": { + "metricsMap": { + "WELLNESS_RESTING_HEART_RATE": [ + {"calendarDate": "2026-03-24", "value": 55.0} + ] + } + } + } + result = parse_resting_heart_rate_data(data) + + assert result.value == 55 + assert isinstance(result.value, int) + + def test_parse_invalid_data_raises(self): + with pytest.raises(ValueError, match="Expected dictionary"): + parse_resting_heart_rate_data("not a dict") + + +class TestRestingHeartRateMetricConfig: + """Test RestingHeartRate metric configuration.""" + + def test_metric_config_exists(self): + from garmy.metrics.resting_heart_rate import __metric_config__ + + assert __metric_config__ is not None + assert __metric_config__.metric_class is RestingHeartRate + assert __metric_config__.parser is parse_resting_heart_rate_data + assert __metric_config__.endpoint_builder is not None + assert __metric_config__.requires_user_id is True + assert isinstance(__metric_config__, MetricConfig) + + def test_importable_from_package(self): + from garmy.metrics import RestingHeartRate as RHRImport + + assert RHRImport is RestingHeartRate + + +class TestRestingHeartRateExtraction: + """Test RestingHeartRate data extraction for daily_health_metrics.""" + + def test_extract_resting_heart_rate(self): + rhr = RestingHeartRate( + calendar_date="2026-03-24", + value=52, + ) + + extractor = DataExtractor() + result = extractor.extract_metric_data(rhr, MetricType.RESTING_HEART_RATE) + + assert result["dedicated_resting_heart_rate"] == 52 + + def test_extract_resting_heart_rate_none(self): + rhr = RestingHeartRate() + extractor = DataExtractor() + result = extractor.extract_metric_data(rhr, MetricType.RESTING_HEART_RATE) + + assert result["dedicated_resting_heart_rate"] is None diff --git a/tests/test_metrics_comprehensive.py b/tests/test_metrics_comprehensive.py index 931e800..444173e 100644 --- a/tests/test_metrics_comprehensive.py +++ b/tests/test_metrics_comprehensive.py @@ -39,8 +39,11 @@ def test_package_imports(self): BodyBattery, Calories, DailySummary, + Floors, HeartRate, + IntensityMinutes, Respiration, + RestingHeartRate, Sleep, SpO2, Steps, @@ -55,8 +58,11 @@ def test_package_imports(self): BodyBattery, Calories, DailySummary, + Floors, HeartRate, + IntensityMinutes, Respiration, + RestingHeartRate, Sleep, SpO2, Steps, @@ -72,13 +78,16 @@ def test_all_exports(self): import garmy.metrics as metrics_module expected_exports = { + "Floors", "HRV", "ActivitySummary", "BodyBattery", "Calories", "DailySummary", "HeartRate", + "IntensityMinutes", "Respiration", + "RestingHeartRate", "Sleep", "SpO2", "Steps", From d88068e3a39224b99202169240ba8d6d00baddb3 Mon Sep 17 00:00:00 2001 From: Jaakko Tiistola Date: Tue, 24 Mar 2026 19:20:22 -0700 Subject: [PATCH 23/26] Add post-activity performance metrics (training status, endurance score) Store training load/status and endurance score in a dedicated performance_metrics table since these update after activities rather than on a fixed daily schedule. Includes new metric classes, extractors, sync routing, MCP tool docs, and database schema documentation. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/database-schema.md | 43 ++- src/garmy/core/endpoint_builders.py | 33 ++ src/garmy/localdb/db.py | 26 ++ src/garmy/localdb/extractors.py | 26 ++ src/garmy/localdb/models.py | 33 ++ src/garmy/localdb/sync.py | 9 + src/garmy/mcp/server.py | 11 +- src/garmy/metrics/__init__.py | 4 + src/garmy/metrics/endurance_score.py | 123 ++++++++ src/garmy/metrics/training_status.py | 168 ++++++++++ tests/test_metrics_comprehensive.py | 2 + tests/test_performance_metrics.py | 439 +++++++++++++++++++++++++++ 12 files changed, 915 insertions(+), 2 deletions(-) create mode 100644 src/garmy/metrics/endurance_score.py create mode 100644 src/garmy/metrics/training_status.py create mode 100644 tests/test_performance_metrics.py diff --git a/docs/database-schema.md b/docs/database-schema.md index 59c7c39..2fb5166 100644 --- a/docs/database-schema.md +++ b/docs/database-schema.md @@ -6,7 +6,7 @@ Complete reference for Garmy's LocalDB database schema and structure. The Garmy LocalDB uses SQLite with optimized tables for health data storage: -- **6 main tables** for different data types +- **7 main tables** for different data types - **Normalized structure** for efficient querying - **Dedicated columns** for common health metrics - **Sync tracking** for data integrity @@ -58,6 +58,13 @@ activity_splits (Cardio lap/split data) ├── start_latitude, start_longitude, end_latitude, end_longitude └── intensity_type, created_at +performance_metrics (Post-activity performance metrics) +├── user_id, metric_date (PK) +├── Training Load: acute_load, chronic_load, load_balance, load_type +├── Training Status: training_status (int code), training_status_feedback +├── Endurance: endurance_score, endurance_score_classification (int code) +└── created_at, updated_at + sync_status (Sync tracking) ├── user_id, sync_date, metric_type (PK) ├── status, synced_at @@ -336,6 +343,38 @@ created_at DATETIME -- Record creation time - `failed` - Sync failed with error - `skipped` - No data available or already exists +### `performance_metrics` +**Purpose:** Post-activity performance metrics that update irregularly (after activities, not daily) + +**Primary Key:** `(user_id, metric_date)` + +**Columns:** +```sql +user_id INTEGER -- User identifier +metric_date DATE -- Date of metric +-- Training Load (from acuteTrainingLoadDTO) +acute_load FLOAT -- 7-day rolling training load +chronic_load FLOAT -- 28-day rolling training load +load_balance FLOAT -- Acute/chronic load ratio +load_type TEXT -- OPTIMAL, OVERREACHING, etc. +-- Training Status (numeric code + feedback phrase) +training_status INTEGER -- Numeric code: 1=DETRAINING, 2=RECOVERY, 3=UNPRODUCTIVE, 4=MAINTAINING, 5=PRODUCTIVE, 6=PEAKING, 7=OVERREACHING +training_status_feedback TEXT -- Feedback phrase (e.g. "MAINTAINING_1") +-- Endurance Score +endurance_score FLOAT -- Absolute score (e.g. 4508) +endurance_score_classification INTEGER -- Numeric code: 1=RECREATIONAL, 2=INTERMEDIATE, 3=TRAINED, 4=WELL_TRAINED, 5=EXPERT, 6=SUPERIOR, 7=ELITE +-- Metadata +created_at DATETIME -- Record creation time +updated_at DATETIME -- Last update time +``` + +**Query Pattern:** Use "last known value" since these update irregularly: +```sql +SELECT * FROM performance_metrics +WHERE user_id = 1 AND metric_date <= '2026-03-24' +ORDER BY metric_date DESC LIMIT 1; +``` + ## 🔍 Common Queries ### Daily Health Trends @@ -547,6 +586,8 @@ Supported metric types in `sync_status` and `timeseries`: - `RESTING_HEART_RATE` - `INTENSITY_MINUTES` - `FLOORS` +- `TRAINING_STATUS` +- `ENDURANCE_SCORE` ## 🔧 Performance Considerations diff --git a/src/garmy/core/endpoint_builders.py b/src/garmy/core/endpoint_builders.py index caf423b..bc3fedd 100644 --- a/src/garmy/core/endpoint_builders.py +++ b/src/garmy/core/endpoint_builders.py @@ -248,3 +248,36 @@ def build_resting_heart_rate_endpoint( """Build resting heart rate endpoint URL.""" builder = UserStatsEndpointBuilder() return builder.build(date_input, api_client, **kwargs) + + +class EnduranceScoreEndpointBuilder(BaseEndpointBuilder): + """Builder for endurance score endpoint (uses query params, no user_id).""" + + def get_endpoint_name(self) -> str: + """Get the name of this endpoint.""" + return "endurance score" + + def build_endpoint_url(self, user_id: str, date_str: str, **kwargs: Any) -> str: + """Build endurance score URL with query parameters.""" + return ( + f"/metrics-service/metrics/endurancescore" + f"?startDate={date_str}&endDate={date_str}&aggregation=daily" + ) + + def build( + self, + date_input: Union["date", str, None] = None, + api_client: Any = None, + **kwargs: Any, + ) -> str: + """Build endpoint URL without requiring user_id.""" + date_str = format_date(date_input) + return self.build_endpoint_url("", date_str, **kwargs) + + +def build_endurance_score_endpoint( + date_input: Union["date", str, None] = None, api_client: Any = None, **kwargs: Any +) -> str: + """Build endurance score endpoint URL.""" + builder = EnduranceScoreEndpointBuilder() + return builder.build(date_input, api_client, **kwargs) diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index d4f78c8..b878085 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -15,6 +15,7 @@ DailyHealthMetric, ExerciseSet, MetricType, + PerformanceMetric, SyncStatus, TimeSeries, ) @@ -170,6 +171,7 @@ def validate_schema(self) -> bool: "exercise_sets", "activity_splits", "body_composition", + "performance_metrics", } actual_tables = set(Base.metadata.tables.keys()) return expected_tables.issubset(actual_tables) @@ -246,6 +248,30 @@ def store_health_metric(self, user_id: int, metric_date: date, **kwargs): session.merge(metric) session.commit() + def store_performance_metric(self, user_id: int, metric_date: date, **kwargs): + """Store performance metric data (training load/status, endurance score).""" + with self.get_session() as session: + metric = ( + session.query(PerformanceMetric) + .filter( + and_( + PerformanceMetric.user_id == user_id, + PerformanceMetric.metric_date == metric_date, + ) + ) + .first() + ) + + if metric is None: + metric = PerformanceMetric(user_id=user_id, metric_date=metric_date) + + for field, value in kwargs.items(): + if hasattr(metric, field): + setattr(metric, field, value) + + session.merge(metric) + session.commit() + def create_sync_status( self, user_id: int, diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 27bd29f..16c291a 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -45,6 +45,10 @@ def extract_metric_data( return self._extract_floors_data(data) elif metric_type == MetricType.BODY_COMPOSITION: return self._extract_body_composition_data(data) + elif metric_type == MetricType.TRAINING_STATUS: + return self._extract_training_status_data(data) + elif metric_type == MetricType.ENDURANCE_SCORE: + return self._extract_endurance_score_data(data) else: return None @@ -314,6 +318,28 @@ def _extract_floors_data(self, data: Any) -> Dict[str, Any]: "floors_descended": getattr(data, "floors_descended", None), } + def _extract_training_status_data(self, data: Any) -> Dict[str, Any]: + """Extract training status and load data for performance_metrics table.""" + return { + "acute_load": getattr(data, "acute_load", None), + "chronic_load": getattr(data, "chronic_load", None), + "load_balance": getattr(data, "load_balance", None), + "load_type": getattr(data, "load_type", None), + "training_status": getattr(data, "training_status", None), + "training_status_feedback": getattr( + data, "training_status_feedback", None + ), + } + + def _extract_endurance_score_data(self, data: Any) -> Dict[str, Any]: + """Extract endurance score data for performance_metrics table.""" + return { + "endurance_score": getattr(data, "endurance_score", None), + "endurance_score_classification": getattr( + data, "endurance_score_classification", None + ), + } + def _extract_activity_data(self, data: Any) -> Dict[str, Any]: """Extract activity data from both parsed and raw formats. diff --git a/src/garmy/localdb/models.py b/src/garmy/localdb/models.py index f0118b8..f562189 100644 --- a/src/garmy/localdb/models.py +++ b/src/garmy/localdb/models.py @@ -38,6 +38,8 @@ class MetricType(Enum): RESTING_HEART_RATE = "resting_heart_rate" INTENSITY_MINUTES = "intensity_minutes" FLOORS = "floors" + TRAINING_STATUS = "training_status" + ENDURANCE_SCORE = "endurance_score" class TimeSeries(Base): @@ -289,3 +291,34 @@ class BodyComposition(Base): # Metadata source_type = Column(String) # e.g., "INDEX_SCALE" created_at = Column(DateTime, default=datetime.utcnow) + + +class PerformanceMetric(Base): + """Post-activity performance metrics (training load/status, endurance score). + + These metrics update irregularly after activities, not on a fixed daily schedule. + Stored separately from daily_health_metrics to avoid NULLs on rest days. + """ + + __tablename__ = "performance_metrics" + + user_id = Column(Integer, primary_key=True, nullable=False) + metric_date = Column(Date, primary_key=True, nullable=False) + + # Training Load (from acuteTrainingLoadDTO) + acute_load = Column(Float) # 7-day rolling + chronic_load = Column(Float) # 28-day rolling + load_balance = Column(Float) # acute / chronic ratio + load_type = Column(Text) # OPTIMAL, OVERREACHING, etc. + + # Training Status (numeric code + feedback phrase) + training_status = Column(Integer) # Numeric code (see TrainingStatus.STATUS_MAP) + training_status_feedback = Column(Text) # e.g. "MAINTAINING_1" + + # Endurance Score + endurance_score = Column(Float) # Absolute value (e.g. 4508) + endurance_score_classification = Column(Integer) # Numeric code (see EnduranceScore.CLASSIFICATION_MAP) + + # Metadata + created_at = Column(DateTime, default=datetime.utcnow) + updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index c238168..d851ee7 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -754,10 +754,19 @@ def _get_cardio_activities_without_splits( return [self.db._activity_to_dict(a) for a in activities] + # Performance metrics stored in separate table (update after activities, not daily) + PERFORMANCE_METRIC_TYPES = { + MetricType.TRAINING_STATUS, + MetricType.ENDURANCE_SCORE, + } + def _store_health_metric( self, user_id: int, sync_date: date, metric_type: MetricType, data: Dict ): """Store health metric data in normalized table.""" + if metric_type in self.PERFORMANCE_METRIC_TYPES: + self.db.store_performance_metric(user_id, sync_date, **data) + return if metric_type == MetricType.DAILY_SUMMARY: self.db.store_health_metric(user_id, sync_date, **data) elif metric_type == MetricType.SLEEP: diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index 50299f5..a2777f1 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -479,7 +479,8 @@ def sync_health_data( metrics: Comma-separated list of metrics to sync (default: all). Available: DAILY_SUMMARY, SLEEP, HEART_RATE, STEPS, STRESS, BODY_BATTERY, HRV, CALORIES, RESPIRATION, TRAINING_READINESS, - ACTIVITIES, BODY_COMPOSITION + ACTIVITIES, BODY_COMPOSITION, SPO2, RESTING_HEART_RATE, + INTENSITY_MINUTES, FLOORS, TRAINING_STATUS, ENDURANCE_SCORE user_id: User ID for database records (default: 1) resync_days: Force re-sync of the last N days even if already completed. Useful for updating partial data from earlier syncs (default: 0, max: 7) @@ -1194,6 +1195,7 @@ def _get_table_description(table_name: str) -> str: "timeseries": "High-frequency data like heart rate readings throughout the day, stress levels, body battery", "activities": "Individual workouts and physical activities with performance metrics", "sync_status": "System table tracking data synchronization status (usually not needed for health analysis)", + "performance_metrics": "Post-activity performance metrics: training load/status, endurance score (updated after activities, not daily)", } return descriptions.get(table_name, "Health data table") @@ -1229,6 +1231,13 @@ def _get_health_data_guide() -> str: **CONTAINS**: heart rate readings, stress measurements, body battery levels with timestamps **USE CASE**: Detailed intraday analysis +### performance_metrics +**WHAT**: Post-activity computed performance metrics +**CONTAINS**: training load (acute/chronic), training status, endurance score with classification +**NOTE**: Updated after activities, not daily. Use "last known value" pattern: +- Latest values: `SELECT * FROM performance_metrics WHERE user_id = 1 AND metric_date <= date('now') ORDER BY metric_date DESC LIMIT 1` +- Training status trend: `SELECT metric_date, training_status, load_type FROM performance_metrics WHERE training_status IS NOT NULL ORDER BY metric_date` + ## Health Metrics Available - **Steps & Movement**: total_steps, total_distance_meters - **Sleep**: sleep_duration_hours, deep_sleep_hours, rem_sleep_hours diff --git a/src/garmy/metrics/__init__.py b/src/garmy/metrics/__init__.py index 24f3658..4883948 100644 --- a/src/garmy/metrics/__init__.py +++ b/src/garmy/metrics/__init__.py @@ -94,8 +94,11 @@ from .steps import Steps from .stress import Stress from .training_readiness import TrainingReadiness +from .training_status import TrainingStatus +from .endurance_score import EnduranceScore __all__: List[str] = [ + "EnduranceScore", "Floors", "HRV", "ActivitySummary", @@ -111,4 +114,5 @@ "Steps", "Stress", "TrainingReadiness", + "TrainingStatus", ] diff --git a/src/garmy/metrics/endurance_score.py b/src/garmy/metrics/endurance_score.py new file mode 100644 index 0000000..5ca9f78 --- /dev/null +++ b/src/garmy/metrics/endurance_score.py @@ -0,0 +1,123 @@ +"""Endurance Score metric module. + +This module provides access to Garmin endurance score data including the +absolute score and classification level. + +Data Source: + Garmin Connect API endpoint: + /metrics-service/metrics/endurancescore?startDate={date}&endDate={date}&aggregation=daily + +API Response Structure: + Flat dict (NOT wrapped in enduranceScoreDTO): + - overallScore: absolute endurance score (e.g. 4508) + - classification: numeric code (1-7) + - calendarDate, feedbackPhrase, contributors, etc. +""" + +from dataclasses import dataclass +from typing import Any, Dict, Optional + +from ..core.base import MetricConfig +from ..core.endpoint_builders import build_endurance_score_endpoint + + +# Garmin endurance score classification numeric code -> label mapping +# Derived from API response field names: classificationLowerLimitIntermediate, +# classificationLowerLimitTrained, etc. +CLASSIFICATION_MAP: Dict[int, str] = { + 1: "RECREATIONAL", + 2: "INTERMEDIATE", + 3: "TRAINED", + 4: "WELL_TRAINED", + 5: "EXPERT", + 6: "SUPERIOR", + 7: "ELITE", +} + + +@dataclass +class EnduranceScore: + """Endurance score data from Garmin Connect API. + + Post-activity metric that provides an overall endurance score with + a classification level. Updated after long efforts. + + Attributes: + calendar_date: Date string (YYYY-MM-DD) + endurance_score: Absolute endurance score (e.g. 4508) + endurance_score_classification: Numeric classification code (see CLASSIFICATION_MAP) + + Example: + >>> es = garmy.endurance_score.get() + >>> print(f"Score: {es.endurance_score} ({es.classification_label})") + """ + + calendar_date: str = "" + endurance_score: Optional[float] = None + endurance_score_classification: Optional[int] = None + + @property + def classification_label(self) -> Optional[str]: + """Resolve numeric classification to human-readable label.""" + if self.endurance_score_classification is None: + return None + return CLASSIFICATION_MAP.get( + self.endurance_score_classification, + f"UNKNOWN_{self.endurance_score_classification}", + ) + + +def parse_endurance_score_data(data: Dict[str, Any]) -> EnduranceScore: + """Parse endurance score API response into structured data. + + The response is a flat dict with overallScore and classification fields. + """ + if not isinstance(data, dict): + raise ValueError( + f"Expected dictionary from API response but got {type(data).__name__}. " + f"Raw data: {data}" + ) + + calendar_date = data.get("calendarDate", "") + endurance_score = _to_float(data.get("overallScore")) + endurance_score_classification = _to_int(data.get("classification")) + + return EnduranceScore( + calendar_date=calendar_date, + endurance_score=endurance_score, + endurance_score_classification=endurance_score_classification, + ) + + +def _to_float(value: Any) -> Optional[float]: + """Safely convert a value to float.""" + if value is None: + return None + try: + return float(value) + except (ValueError, TypeError): + return None + + +def _to_int(value: Any) -> Optional[int]: + """Safely convert a value to int.""" + if value is None: + return None + try: + return int(value) + except (ValueError, TypeError): + return None + + +# Declarative configuration for auto-discovery with endpoint builder +METRIC_CONFIG = MetricConfig( + endpoint="/metrics-service/metrics/endurancescore", + metric_class=EnduranceScore, + parser=parse_endurance_score_data, + endpoint_builder=build_endurance_score_endpoint, + description="Endurance score with classification level", + version="1.0", +) + +# Export for auto-discovery +__metric_config__ = METRIC_CONFIG diff --git a/src/garmy/metrics/training_status.py b/src/garmy/metrics/training_status.py new file mode 100644 index 0000000..77f6ab9 --- /dev/null +++ b/src/garmy/metrics/training_status.py @@ -0,0 +1,168 @@ +"""Training Status metric module. + +This module provides access to Garmin training status and training load data +from a single API endpoint. Covers both the training load balance (acute vs +chronic load) and the overall training status assessment. + +Data Source: + Garmin Connect API endpoint: + /metrics-service/metrics/trainingstatus/aggregated/{date} + +API Response Structure: + The response nests data under device IDs: + - mostRecentTrainingStatus.latestTrainingStatusData.{deviceId} + - trainingStatus (int), trainingStatusFeedbackPhrase (str) + - acuteTrainingLoadDTO: dailyTrainingLoadAcute, dailyTrainingLoadChronic, + dailyAcuteChronicWorkloadRatio, acwrStatus +""" + +from dataclasses import dataclass +from typing import Any, Dict, Optional + +from ..core.base import MetricConfig + + +# Garmin training status numeric code -> label mapping +STATUS_MAP: Dict[int, str] = { + 0: "NOT_APPLICABLE", + 1: "DETRAINING", + 2: "RECOVERY", + 3: "UNPRODUCTIVE", + 4: "MAINTAINING", + 5: "PRODUCTIVE", + 6: "PEAKING", + 7: "OVERREACHING", +} + + +@dataclass +class TrainingStatus: + """Training status and load data from Garmin Connect API. + + Post-activity metric combining training load balance (acute/chronic) and + overall training status. Updated after activities are synced. + + Attributes: + calendar_date: Date string (YYYY-MM-DD) + acute_load: 7-day rolling training load + chronic_load: 28-day rolling training load + load_balance: Acute/chronic load ratio + load_type: Load classification (OPTIMAL, OVERREACHING, etc.) + training_status: Numeric status code (see STATUS_MAP) + training_status_feedback: Feedback phrase from API + + Example: + >>> ts = garmy.training_status.get() + >>> print(f"Status: {ts.status_label}") + >>> print(f"Load balance: {ts.load_balance:.2f}") + """ + + calendar_date: str = "" + acute_load: Optional[float] = None + chronic_load: Optional[float] = None + load_balance: Optional[float] = None + load_type: Optional[str] = None + training_status: Optional[int] = None + training_status_feedback: Optional[str] = None + + @property + def status_label(self) -> Optional[str]: + """Resolve numeric training status to human-readable label.""" + if self.training_status is None: + return None + return STATUS_MAP.get(self.training_status, f"UNKNOWN_{self.training_status}") + + +def _get_first_device_data(nested: Any) -> Optional[Dict[str, Any]]: + """Extract first device's data from a {deviceId: data} dict.""" + if not isinstance(nested, dict): + return None + for value in nested.values(): + if isinstance(value, dict): + return value + return None + + +def parse_training_status_data(data: Dict[str, Any]) -> TrainingStatus: + """Parse training status API response into structured data. + + Actual API nests data under device IDs: + - mostRecentTrainingStatus.latestTrainingStatusData.{deviceId} + - mostRecentTrainingStatus.latestTrainingStatusData.{deviceId}.acuteTrainingLoadDTO + """ + if not isinstance(data, dict): + raise ValueError( + f"Expected dictionary from API response but got {type(data).__name__}. " + f"Raw data: {data}" + ) + + calendar_date = "" + training_status = None + training_status_feedback = None + acute_load = None + chronic_load = None + load_balance = None + load_type = None + + # Navigate: mostRecentTrainingStatus.latestTrainingStatusData.{deviceId} + mrt = data.get("mostRecentTrainingStatus") or {} + status_map = mrt.get("latestTrainingStatusData") or {} + status_data = _get_first_device_data(status_map) + + if status_data: + calendar_date = status_data.get("calendarDate", "") + training_status = _to_int(status_data.get("trainingStatus")) + training_status_feedback = status_data.get("trainingStatusFeedbackPhrase") + + # Extract load from nested acuteTrainingLoadDTO + load_dto = status_data.get("acuteTrainingLoadDTO") or {} + if isinstance(load_dto, dict): + acute_load = _to_float(load_dto.get("dailyTrainingLoadAcute")) + chronic_load = _to_float(load_dto.get("dailyTrainingLoadChronic")) + load_balance = _to_float( + load_dto.get("dailyAcuteChronicWorkloadRatio") + ) + load_type = load_dto.get("acwrStatus") + + return TrainingStatus( + calendar_date=calendar_date, + acute_load=acute_load, + chronic_load=chronic_load, + load_balance=load_balance, + load_type=load_type, + training_status=training_status, + training_status_feedback=training_status_feedback, + ) + + +def _to_float(value: Any) -> Optional[float]: + """Safely convert a value to float.""" + if value is None: + return None + try: + return float(value) + except (ValueError, TypeError): + return None + + +def _to_int(value: Any) -> Optional[int]: + """Safely convert a value to int.""" + if value is None: + return None + try: + return int(value) + except (ValueError, TypeError): + return None + + +# Declarative configuration for auto-discovery +METRIC_CONFIG = MetricConfig( + endpoint="/metrics-service/metrics/trainingstatus/aggregated/{date}", + metric_class=TrainingStatus, + parser=parse_training_status_data, + description="Training status and load balance (acute/chronic load, status assessment)", + version="1.0", +) + +# Export for auto-discovery +__metric_config__ = METRIC_CONFIG diff --git a/tests/test_metrics_comprehensive.py b/tests/test_metrics_comprehensive.py index 444173e..21d79af 100644 --- a/tests/test_metrics_comprehensive.py +++ b/tests/test_metrics_comprehensive.py @@ -78,6 +78,7 @@ def test_all_exports(self): import garmy.metrics as metrics_module expected_exports = { + "EnduranceScore", "Floors", "HRV", "ActivitySummary", @@ -93,6 +94,7 @@ def test_all_exports(self): "Steps", "Stress", "TrainingReadiness", + "TrainingStatus", } actual_exports = set(metrics_module.__all__) diff --git a/tests/test_performance_metrics.py b/tests/test_performance_metrics.py new file mode 100644 index 0000000..53e1e06 --- /dev/null +++ b/tests/test_performance_metrics.py @@ -0,0 +1,439 @@ +"""Tests for PR3 performance metrics: TrainingStatus, EnduranceScore.""" + +import pytest + +from garmy.core.base import MetricConfig +from garmy.localdb.extractors import DataExtractor +from garmy.localdb.models import MetricType, PerformanceMetric +from garmy.metrics.endurance_score import ( + CLASSIFICATION_MAP, + EnduranceScore, + parse_endurance_score_data, +) +from garmy.metrics.training_status import ( + STATUS_MAP, + TrainingStatus, + parse_training_status_data, +) + + +# --------------------------------------------------------------------------- +# TrainingStatus +# --------------------------------------------------------------------------- + + +class TestTrainingStatusParsing: + """Test TrainingStatus API response parsing with actual API structure.""" + + def create_sample_response(self, **overrides): + """Create a realistic training status API response matching actual Garmin API.""" + response = { + "userId": 123456, + "mostRecentTrainingStatus": { + "latestTrainingStatusData": { + "3456789": { + "calendarDate": "2026-03-24", + "deviceId": "3456789", + "trainingStatus": 4, + "trainingStatusFeedbackPhrase": "MAINTAINING_1", + "trainingPaused": False, + "acuteTrainingLoadDTO": { + "acwrPercent": 38, + "acwrStatus": "OPTIMAL", + "dailyTrainingLoadAcute": 345.0, + "dailyTrainingLoadChronic": 375.0, + "dailyAcuteChronicWorkloadRatio": 0.92, + }, + } + } + }, + "mostRecentTrainingLoadBalance": {}, + "heatAltitudeAcclimationDTO": None, + } + response.update(overrides) + return response + + def test_parse_full_response(self): + data = self.create_sample_response() + result = parse_training_status_data(data) + + assert isinstance(result, TrainingStatus) + assert result.calendar_date == "2026-03-24" + assert result.training_status == 4 + assert result.training_status_feedback == "MAINTAINING_1" + assert result.acute_load == 345.0 + assert result.chronic_load == 375.0 + assert result.load_balance == 0.92 + assert result.load_type == "OPTIMAL" + + def test_parse_empty_response(self): + data = {} + result = parse_training_status_data(data) + + assert result.calendar_date == "" + assert result.acute_load is None + assert result.chronic_load is None + assert result.load_balance is None + assert result.load_type is None + assert result.training_status is None + assert result.training_status_feedback is None + + def test_parse_missing_training_status(self): + """Test parsing when mostRecentTrainingStatus is missing.""" + data = {"userId": 123456} + result = parse_training_status_data(data) + + assert result.training_status is None + assert result.acute_load is None + + def test_parse_empty_device_map(self): + """Test parsing when device map is empty.""" + data = { + "mostRecentTrainingStatus": { + "latestTrainingStatusData": {} + } + } + result = parse_training_status_data(data) + + assert result.training_status is None + + def test_parse_no_load_dto(self): + """Test parsing when acuteTrainingLoadDTO is missing.""" + data = { + "mostRecentTrainingStatus": { + "latestTrainingStatusData": { + "3456789": { + "calendarDate": "2026-03-24", + "trainingStatus": 5, + "trainingStatusFeedbackPhrase": "PRODUCTIVE_1", + } + } + } + } + result = parse_training_status_data(data) + + assert result.training_status == 5 + assert result.training_status_feedback == "PRODUCTIVE_1" + assert result.acute_load is None + assert result.chronic_load is None + + def test_parse_invalid_data_raises(self): + with pytest.raises(ValueError, match="Expected dictionary"): + parse_training_status_data("not a dict") + + def test_status_label_property(self): + ts = TrainingStatus(training_status=4) + assert ts.status_label == "MAINTAINING" + + def test_status_label_productive(self): + ts = TrainingStatus(training_status=5) + assert ts.status_label == "PRODUCTIVE" + + def test_status_label_none(self): + ts = TrainingStatus() + assert ts.status_label is None + + def test_status_label_unknown(self): + ts = TrainingStatus(training_status=99) + assert ts.status_label == "UNKNOWN_99" + + def test_status_map_has_expected_statuses(self): + """Verify STATUS_MAP contains all known statuses from Garmin.""" + expected = { + "DETRAINING", + "RECOVERY", + "MAINTAINING", + "PRODUCTIVE", + "OVERREACHING", + } + actual_labels = set(STATUS_MAP.values()) + assert expected.issubset(actual_labels) + + +class TestTrainingStatusMetricConfig: + """Test TrainingStatus metric configuration.""" + + def test_metric_config_exists(self): + from garmy.metrics.training_status import __metric_config__ + + assert __metric_config__ is not None + assert ( + __metric_config__.endpoint + == "/metrics-service/metrics/trainingstatus/aggregated/{date}" + ) + assert __metric_config__.metric_class is TrainingStatus + assert __metric_config__.parser is parse_training_status_data + assert isinstance(__metric_config__, MetricConfig) + + def test_importable_from_package(self): + from garmy.metrics import TrainingStatus as Imported + + assert Imported is TrainingStatus + + +class TestTrainingStatusExtraction: + """Test TrainingStatus data extraction for performance_metrics table.""" + + def test_extract_training_status(self): + ts = TrainingStatus( + calendar_date="2026-03-24", + acute_load=345.0, + chronic_load=375.0, + load_balance=0.92, + load_type="OPTIMAL", + training_status=4, + training_status_feedback="MAINTAINING_1", + ) + + extractor = DataExtractor() + result = extractor.extract_metric_data(ts, MetricType.TRAINING_STATUS) + + assert result["acute_load"] == 345.0 + assert result["chronic_load"] == 375.0 + assert result["load_balance"] == 0.92 + assert result["load_type"] == "OPTIMAL" + assert result["training_status"] == 4 + assert result["training_status_feedback"] == "MAINTAINING_1" + + def test_extract_training_status_none_values(self): + ts = TrainingStatus() + extractor = DataExtractor() + result = extractor.extract_metric_data(ts, MetricType.TRAINING_STATUS) + + assert result["acute_load"] is None + assert result["chronic_load"] is None + assert result["load_balance"] is None + assert result["load_type"] is None + assert result["training_status"] is None + assert result["training_status_feedback"] is None + + +# --------------------------------------------------------------------------- +# EnduranceScore +# --------------------------------------------------------------------------- + + +class TestEnduranceScoreParsing: + """Test EnduranceScore API response parsing with actual API structure.""" + + def create_sample_response(self, **overrides): + """Create a realistic endurance score API response (flat dict).""" + response = { + "userProfilePK": 123456, + "deviceId": "3456789", + "calendarDate": "2026-03-24", + "overallScore": 4508, + "classification": 1, + "feedbackPhrase": "20", + "primaryTrainingDevice": True, + "gaugeLowerLimit": 0, + "gaugeUpperLimit": 10000, + "classificationLowerLimitIntermediate": 3846, + "classificationLowerLimitTrained": 5246, + "classificationLowerLimitWellTrained": 6616, + "classificationLowerLimitExpert": 7776, + "classificationLowerLimitSuperior": 8396, + "classificationLowerLimitElite": 9016, + "contributors": [ + {"activityTypeId": 13, "contribution": 66.09}, + {"group": 1, "contribution": 22.3}, + ], + } + response.update(overrides) + return response + + def test_parse_full_response(self): + data = self.create_sample_response() + result = parse_endurance_score_data(data) + + assert isinstance(result, EnduranceScore) + assert result.calendar_date == "2026-03-24" + assert result.endurance_score == 4508.0 + assert result.endurance_score_classification == 1 + + def test_parse_empty_response(self): + data = {} + result = parse_endurance_score_data(data) + + assert result.calendar_date == "" + assert result.endurance_score is None + assert result.endurance_score_classification is None + + def test_parse_missing_classification(self): + data = {"calendarDate": "2026-03-24", "overallScore": 5000} + result = parse_endurance_score_data(data) + + assert result.endurance_score == 5000.0 + assert result.endurance_score_classification is None + + def test_parse_invalid_data_raises(self): + with pytest.raises(ValueError, match="Expected dictionary"): + parse_endurance_score_data("not a dict") + + def test_classification_label_property(self): + es = EnduranceScore(endurance_score_classification=1) + assert es.classification_label == "RECREATIONAL" + + def test_classification_label_elite(self): + es = EnduranceScore(endurance_score_classification=7) + assert es.classification_label == "ELITE" + + def test_classification_label_none(self): + es = EnduranceScore() + assert es.classification_label is None + + def test_classification_label_unknown(self): + es = EnduranceScore(endurance_score_classification=99) + assert es.classification_label == "UNKNOWN_99" + + def test_classification_map_has_expected_levels(self): + """Verify CLASSIFICATION_MAP has all levels from API field names.""" + expected = { + "RECREATIONAL", + "INTERMEDIATE", + "TRAINED", + "WELL_TRAINED", + "EXPERT", + "SUPERIOR", + "ELITE", + } + assert set(CLASSIFICATION_MAP.values()) == expected + + +class TestEnduranceScoreMetricConfig: + """Test EnduranceScore metric configuration.""" + + def test_metric_config_exists(self): + from garmy.metrics.endurance_score import __metric_config__ + + assert __metric_config__ is not None + assert __metric_config__.endpoint == "/metrics-service/metrics/endurancescore" + assert __metric_config__.metric_class is EnduranceScore + assert __metric_config__.parser is parse_endurance_score_data + assert isinstance(__metric_config__, MetricConfig) + + def test_has_endpoint_builder(self): + from garmy.metrics.endurance_score import __metric_config__ + + assert __metric_config__.endpoint_builder is not None + + def test_importable_from_package(self): + from garmy.metrics import EnduranceScore as Imported + + assert Imported is EnduranceScore + + +class TestEnduranceScoreExtraction: + """Test EnduranceScore data extraction for performance_metrics table.""" + + def test_extract_endurance_score(self): + es = EnduranceScore( + calendar_date="2026-03-24", + endurance_score=4508.0, + endurance_score_classification=1, + ) + + extractor = DataExtractor() + result = extractor.extract_metric_data(es, MetricType.ENDURANCE_SCORE) + + assert result["endurance_score"] == 4508.0 + assert result["endurance_score_classification"] == 1 + + def test_extract_endurance_score_none_values(self): + es = EnduranceScore() + extractor = DataExtractor() + result = extractor.extract_metric_data(es, MetricType.ENDURANCE_SCORE) + + assert result["endurance_score"] is None + assert result["endurance_score_classification"] is None + + +# --------------------------------------------------------------------------- +# Endpoint Builder +# --------------------------------------------------------------------------- + + +class TestEnduranceScoreEndpointBuilder: + """Test EnduranceScore endpoint builder.""" + + def test_build_endpoint_url(self): + from garmy.core.endpoint_builders import build_endurance_score_endpoint + + url = build_endurance_score_endpoint("2026-03-24") + + assert "/metrics-service/metrics/endurancescore" in url + assert "startDate=2026-03-24" in url + assert "endDate=2026-03-24" in url + assert "aggregation=daily" in url + + def test_build_from_date_object(self): + from datetime import date + + from garmy.core.endpoint_builders import build_endurance_score_endpoint + + url = build_endurance_score_endpoint(date(2026, 3, 24)) + + assert "startDate=2026-03-24" in url + assert "endDate=2026-03-24" in url + + def test_no_user_id_in_url(self): + """Verify endpoint builder does not require API client / user_id.""" + from garmy.core.endpoint_builders import build_endurance_score_endpoint + + url = build_endurance_score_endpoint("2026-03-24", api_client=None) + assert "endurancescore" in url + + +# --------------------------------------------------------------------------- +# MetricType Enum +# --------------------------------------------------------------------------- + + +class TestMetricTypeEntries: + """Test new MetricType enum entries exist.""" + + def test_training_status_enum(self): + assert MetricType.TRAINING_STATUS.value == "training_status" + + def test_endurance_score_enum(self): + assert MetricType.ENDURANCE_SCORE.value == "endurance_score" + + +# --------------------------------------------------------------------------- +# PerformanceMetric Model +# --------------------------------------------------------------------------- + + +class TestPerformanceMetricModel: + """Test PerformanceMetric SQLAlchemy model.""" + + def test_table_name(self): + assert PerformanceMetric.__tablename__ == "performance_metrics" + + def test_has_training_load_columns(self): + columns = {c.name for c in PerformanceMetric.__table__.columns} + assert "acute_load" in columns + assert "chronic_load" in columns + assert "load_balance" in columns + assert "load_type" in columns + + def test_has_training_status_columns(self): + columns = {c.name for c in PerformanceMetric.__table__.columns} + assert "training_status" in columns + assert "training_status_feedback" in columns + + def test_has_endurance_score_columns(self): + columns = {c.name for c in PerformanceMetric.__table__.columns} + assert "endurance_score" in columns + assert "endurance_score_classification" in columns + + def test_no_vo2_max_columns(self): + """VO2 Max was removed — verify columns don't exist.""" + columns = {c.name for c in PerformanceMetric.__table__.columns} + assert "vo2_max_running" not in columns + assert "vo2_max_cycling" not in columns + assert "fitness_age" not in columns + + def test_primary_key(self): + pk_columns = {c.name for c in PerformanceMetric.__table__.primary_key.columns} + assert pk_columns == {"user_id", "metric_date"} From b32647eee1cd49a46201b8c500c5b1092942b548 Mon Sep 17 00:00:00 2001 From: Jaakko Tiistola Date: Tue, 24 Mar 2026 19:54:54 -0700 Subject: [PATCH 24/26] Update README metrics table and package docstring with all PR1-PR3 metrics Backfill metrics from PR1 (SpO2, HRV) and PR2 (resting HR, intensity minutes, floors) that were missing from the README table, and add PR3 metrics (training status, endurance score). Also update the package docstring in metrics/__init__.py to list all new metric classes. Co-Authored-By: Claude Opus 4.6 (1M context) --- README.md | 8 ++++++++ src/garmy/metrics/__init__.py | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/README.md b/README.md index ab8ba43..bb79578 100644 --- a/README.md +++ b/README.md @@ -180,6 +180,14 @@ Garmy provides access to a comprehensive set of Garmin Connect metrics: | `training_readiness` | Training readiness scores and factors | `api_client.metrics.get('training_readiness').get()` | | `body_battery` | Body battery energy levels | `api_client.metrics.get('body_battery').get()` | | `activities` | Activity summaries and details | `api_client.metrics.get('activities').list(days=30)` | +| `hrv` | Heart rate variability with sleep readings | `api_client.metrics.get('hrv').get()` | +| `spo2` | Blood oxygen saturation with hourly averages | `api_client.metrics.get('spo2').get()` | +| `respiration` | Breathing rate (waking and sleep) | `api_client.metrics.get('respiration').get()` | +| `resting_heart_rate` | Dedicated resting HR from user stats | `api_client.metrics.get('resting_heart_rate').get()` | +| `intensity_minutes` | Moderate/vigorous intensity with timeseries | `api_client.metrics.get('intensity_minutes').get()` | +| `floors` | Floors climbed and descended | `api_client.metrics.get('floors').get()` | +| `training_status` | Training status, load balance (acute/chronic) | `api_client.metrics.get('training_status').get()` | +| `endurance_score` | Endurance score with classification | `api_client.metrics.get('endurance_score').get()` | ## 🧑‍💻 Architecture Overview diff --git a/src/garmy/metrics/__init__.py b/src/garmy/metrics/__init__.py index 4883948..cd089b1 100644 --- a/src/garmy/metrics/__init__.py +++ b/src/garmy/metrics/__init__.py @@ -16,6 +16,12 @@ Calories: Daily calorie data including burned, active, BMR, and goal tracking DailySummary: Comprehensive daily summary with all major health metrics in one place Activities: Activity summaries with type, duration, heart rate, and basic performance data + SpO2: Blood oxygen saturation with hourly average readings + RestingHeartRate: Dedicated resting heart rate from user stats service + IntensityMinutes: Moderate/vigorous intensity minutes with 15-min timeseries + Floors: Floors climbed and descended throughout the day + TrainingStatus: Training status, acute/chronic load balance + EnduranceScore: Endurance score with classification level Modern API Usage: >>> from garmy import AuthClient, APIClient From 48bbb791df7b531a54f635ba6efb9b36e9292599 Mon Sep 17 00:00:00 2001 From: Jaakko Tiistola Date: Tue, 24 Mar 2026 20:39:32 -0700 Subject: [PATCH 25/26] Clarify timeseries timestamp is epoch ms in MCP docs and SQL examples Co-Authored-By: Claude Opus 4.6 (1M context) --- src/garmy/mcp/server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index a2777f1..bfe74cb 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -325,7 +325,7 @@ def execute_sql_query( - Health metrics: "SELECT metric_date, sleep_duration_hours FROM daily_health_metrics WHERE user_id = 1 ORDER BY metric_date DESC LIMIT 10" - Activities: "SELECT activity_date, activity_name, duration_seconds FROM activities WHERE user_id = 1" - High step days: "SELECT metric_date, total_steps FROM daily_health_metrics WHERE total_steps > 10000" - - Timeseries data: "SELECT timestamp, value FROM timeseries WHERE metric_type = 'heart_rate'" + - Timeseries data (timestamp is epoch ms): "SELECT datetime(timestamp/1000, 'unixepoch', 'localtime') as ts, value FROM timeseries WHERE metric_type = 'heart_rate' AND timestamp/1000 BETWEEN unixepoch('2026-01-01', 'localtime') AND unixepoch('2026-01-02', 'localtime')" Returns: List of matching records as dictionaries @@ -1192,7 +1192,7 @@ def _get_table_description(table_name: str) -> str: """Get human-readable description for table.""" descriptions = { "daily_health_metrics": "Daily health summaries including steps, sleep, heart rate, stress, and other key metrics", - "timeseries": "High-frequency data like heart rate readings throughout the day, stress levels, body battery", + "timeseries": "High-frequency data like heart rate readings throughout the day, stress levels, body battery. IMPORTANT: timestamp column is epoch milliseconds (not seconds) — use datetime(timestamp/1000, 'unixepoch', 'localtime') for date filtering and display", "activities": "Individual workouts and physical activities with performance metrics", "sync_status": "System table tracking data synchronization status (usually not needed for health analysis)", "performance_metrics": "Post-activity performance metrics: training load/status, endurance score (updated after activities, not daily)", @@ -1229,6 +1229,7 @@ def _get_health_data_guide() -> str: ### timeseries **WHAT**: High-frequency data throughout the day **CONTAINS**: heart rate readings, stress measurements, body battery levels with timestamps +**IMPORTANT**: The `timestamp` column stores epoch milliseconds (not seconds). Use `datetime(timestamp/1000, 'unixepoch', 'localtime')` for human-readable times and `timestamp/1000 BETWEEN unixepoch('2026-01-01', 'localtime') AND unixepoch('2026-01-02', 'localtime')` for date filtering. **USE CASE**: Detailed intraday analysis ### performance_metrics From c763d8d712b2c69795a31f00b7df8adbe5a29b53 Mon Sep 17 00:00:00 2001 From: Daniel Velazco Date: Thu, 7 May 2026 12:18:54 -0700 Subject: [PATCH 26/26] feat: add health snapshot support across api, localdb, and mcp Adds typed access to Garmin Health Snapshot recordings (the on-demand ~2-minute multi-metric reading captured on the watch) via a new GraphQL endpoint on connectapi.garmin.com/graphql-gateway/graphql. - New HealthSnapshot/HealthSnapshotSummary/HealthSnapshotZone dataclasses and HealthSnapshotAccessor exposed as client.health_snapshots, with raw/get/range/latest/for_date methods. Range queries auto-chunk past the API's 31-day cap. - LocalDB: HEALTH_SNAPSHOT MetricType plus three tables (health_snapshots, health_snapshot_summaries, health_snapshot_zones) with merge-based upsert. SyncManager routes HEALTH_SNAPSHOT through a dedicated batch path, matching the body-composition pattern. - MCP: table descriptions and health-data guide updated; HEALTH_SNAPSHOT added to the sync_health_data tool's available metrics list. ECG was also investigated but no public Garmin Connect endpoint could be found via probing or community sources, so it is not included here. --- src/garmy/core/client.py | 24 ++ src/garmy/localdb/db.py | 103 +++++++ src/garmy/localdb/extractors.py | 75 +++++ src/garmy/localdb/models.py | 65 ++++ src/garmy/localdb/sync.py | 86 +++++- src/garmy/mcp/server.py | 17 +- src/garmy/metrics/__init__.py | 8 + src/garmy/metrics/health_snapshot.py | 428 ++++++++++++++++++++++++++ tests/test_localdb_health_snapshot.py | 345 +++++++++++++++++++++ tests/test_metrics_comprehensive.py | 3 + tests/test_metrics_health_snapshot.py | 325 +++++++++++++++++++ 11 files changed, 1476 insertions(+), 3 deletions(-) create mode 100644 src/garmy/metrics/health_snapshot.py create mode 100644 tests/test_localdb_health_snapshot.py create mode 100644 tests/test_metrics_health_snapshot.py diff --git a/src/garmy/core/client.py b/src/garmy/core/client.py index e8f3700..c7272a5 100644 --- a/src/garmy/core/client.py +++ b/src/garmy/core/client.py @@ -23,6 +23,7 @@ if TYPE_CHECKING: from ..auth.client import AuthClient + from ..metrics.health_snapshot import HealthSnapshotAccessor from ..workouts.client import WorkoutClient from .registry import MetricRegistry @@ -307,6 +308,29 @@ def workouts(self) -> "WorkoutClient": self._workouts = WorkoutClient(self) return self._workouts + @property + def health_snapshots(self) -> "HealthSnapshotAccessor": + """Get the Health Snapshot accessor. + + Provides lazy-loaded access to Garmin Health Snapshot recordings, + the on-demand ~2-minute multi-metric measurements taken on a compatible + watch. Uses the GraphQL endpoint and supports range-based queries. + + Returns: + HealthSnapshotAccessor instance for fetching snapshots. + + Example: + >>> client = APIClient(auth_client=auth) + >>> recent = client.health_snapshots.latest(days=30) + >>> for snap in recent: + ... print(snap.calendar_date, snap.heart_rate.avg_value) + """ + if not hasattr(self, "_health_snapshots"): + from ..metrics.health_snapshot import HealthSnapshotAccessor + + self._health_snapshots = HealthSnapshotAccessor(self) + return self._health_snapshots + def get_user_profile(self) -> Dict[str, Any]: """Get user profile information from the API. diff --git a/src/garmy/localdb/db.py b/src/garmy/localdb/db.py index 760b947..70f8821 100644 --- a/src/garmy/localdb/db.py +++ b/src/garmy/localdb/db.py @@ -14,6 +14,9 @@ BodyComposition, DailyHealthMetric, ExerciseSet, + HealthSnapshotRecord, + HealthSnapshotSummaryStat, + HealthSnapshotZoneTime, MetricType, PerformanceMetric, SyncStatus, @@ -926,6 +929,106 @@ def body_composition_exists(self, user_id: int, sample_pk: str) -> bool: is not None ) + def store_health_snapshot( + self, + user_id: int, + record: Dict[str, Any], + summaries: List[Dict[str, Any]], + zones: List[Dict[str, Any]], + ) -> None: + """Store a single Health Snapshot and its related summary/zone rows. + + Uses session.merge() so re-syncing the same activity_uuid upserts. + + Args: + user_id: User identifier. + record: Top-level snapshot dict (matches HealthSnapshotRecord columns, + without user_id which is added here). + summaries: List of 6 summary dicts (HealthSnapshotSummaryStat rows). + zones: List of 6 zone dicts (HealthSnapshotZoneTime rows). + """ + activity_uuid = record.get("activity_uuid") + if not activity_uuid: + return + + with self.get_session() as session: + def _parse_ts(value: Any) -> Optional[datetime]: + if value is None or isinstance(value, datetime): + return value + if isinstance(value, str): + try: + return datetime.fromisoformat(value.replace("Z", "+00:00")) + except (ValueError, TypeError): + return None + return None + + cal_date = record.get("calendar_date") + if isinstance(cal_date, str): + try: + cal_date = date.fromisoformat(cal_date) + except (ValueError, TypeError): + cal_date = None + + snap = HealthSnapshotRecord( + user_id=user_id, + activity_uuid=activity_uuid, + calendar_date=cal_date, + start_timestamp_gmt=_parse_ts(record.get("start_timestamp_gmt")), + start_timestamp_local=_parse_ts(record.get("start_timestamp_local")), + end_timestamp_gmt=_parse_ts(record.get("end_timestamp_gmt")), + end_timestamp_local=_parse_ts(record.get("end_timestamp_local")), + wellness_activity_type=record.get("wellness_activity_type"), + notes=record.get("notes"), + rule_pk=record.get("rule_pk"), + user_profile_pk=record.get("user_profile_pk"), + device_meta_data=record.get("device_meta_data"), + ) + session.merge(snap) + + for s in summaries: + if s.get("activity_uuid") != activity_uuid: + continue + session.merge( + HealthSnapshotSummaryStat( + user_id=user_id, + activity_uuid=activity_uuid, + summary_type=s.get("summary_type", ""), + min_value=s.get("min_value"), + max_value=s.get("max_value"), + avg_value=s.get("avg_value", 0.0), + ) + ) + + for z in zones: + if z.get("activity_uuid") != activity_uuid: + continue + session.merge( + HealthSnapshotZoneTime( + user_id=user_id, + activity_uuid=activity_uuid, + zone_number=z.get("zone_number", 0), + millis_in_zone=z.get("millis_in_zone", 0), + zone_low_boundary=z.get("zone_low_boundary", 0), + ) + ) + + session.commit() + + def health_snapshot_exists(self, user_id: int, activity_uuid: str) -> bool: + """Check if a Health Snapshot with this activity_uuid is already stored.""" + with self.get_session() as session: + return ( + session.query(HealthSnapshotRecord) + .filter( + and_( + HealthSnapshotRecord.user_id == user_id, + HealthSnapshotRecord.activity_uuid == activity_uuid, + ) + ) + .first() + is not None + ) + def _body_composition_to_dict(self, bc: BodyComposition) -> Dict[str, Any]: """Convert BodyComposition to dictionary.""" return { diff --git a/src/garmy/localdb/extractors.py b/src/garmy/localdb/extractors.py index 16c291a..8fe3672 100644 --- a/src/garmy/localdb/extractors.py +++ b/src/garmy/localdb/extractors.py @@ -702,6 +702,81 @@ def calculate_splits_summary(self, splits: List[Dict[str, Any]]) -> Dict[str, An "avg_pace_min_km": avg_pace_min_km, } + def extract_health_snapshots( + self, snapshots: List[Any] + ) -> Dict[str, List[Dict[str, Any]]]: + """Extract DB-ready rows for the three Health Snapshot tables. + + Args: + snapshots: List of HealthSnapshot dataclass instances (from + APIClient.health_snapshots.range / latest / for_date). + + Returns: + Dict with three keys: + - "records": one dict per snapshot, for the health_snapshots table + - "summaries": 6 dicts per snapshot, for health_snapshot_summaries + - "zones": 6 dicts per snapshot, for health_snapshot_zones + + All dicts are keyed by activity_uuid (caller adds user_id). + """ + records: List[Dict[str, Any]] = [] + summaries: List[Dict[str, Any]] = [] + zones: List[Dict[str, Any]] = [] + + for snap in snapshots: + uuid = getattr(snap, "activity_uuid", None) + if not uuid: + continue + + cal_date = getattr(snap, "calendar_date_obj", None) + if cal_date is None: + date_str = getattr(snap, "calendar_date", "") + try: + cal_date = date.fromisoformat(date_str) if date_str else None + except (ValueError, TypeError): + cal_date = None + + records.append( + { + "activity_uuid": uuid, + "calendar_date": cal_date, + "start_timestamp_gmt": getattr(snap, "start_timestamp_gmt", None), + "start_timestamp_local": getattr(snap, "start_timestamp_local", None), + "end_timestamp_gmt": getattr(snap, "end_timestamp_gmt", None), + "end_timestamp_local": getattr(snap, "end_timestamp_local", None), + "wellness_activity_type": getattr( + snap, "wellness_activity_type", None + ), + "notes": getattr(snap, "notes", None), + "rule_pk": getattr(snap, "rule_pk", None), + "user_profile_pk": getattr(snap, "user_profile_pk", None), + "device_meta_data": getattr(snap, "device_meta_data", None), + } + ) + + for summary in getattr(snap, "summaries", []) or []: + summaries.append( + { + "activity_uuid": uuid, + "summary_type": getattr(summary, "summary_type", ""), + "min_value": getattr(summary, "min_value", None), + "max_value": getattr(summary, "max_value", None), + "avg_value": getattr(summary, "avg_value", 0.0), + } + ) + + for zone in getattr(snap, "time_in_zone", []) or []: + zones.append( + { + "activity_uuid": uuid, + "zone_number": getattr(zone, "zone_number", 0), + "millis_in_zone": getattr(zone, "millis_in_zone", 0), + "zone_low_boundary": getattr(zone, "zone_low_boundary", 0), + } + ) + + return {"records": records, "summaries": summaries, "zones": zones} + def _extract_body_composition_data(self, data: Dict) -> List[Dict[str, Any]]: """Extract body composition entries from weight service response. diff --git a/src/garmy/localdb/models.py b/src/garmy/localdb/models.py index f562189..2247827 100644 --- a/src/garmy/localdb/models.py +++ b/src/garmy/localdb/models.py @@ -40,6 +40,7 @@ class MetricType(Enum): FLOORS = "floors" TRAINING_STATUS = "training_status" ENDURANCE_SCORE = "endurance_score" + HEALTH_SNAPSHOT = "health_snapshot" class TimeSeries(Base): @@ -322,3 +323,67 @@ class PerformanceMetric(Base): # Metadata created_at = Column(DateTime, default=datetime.utcnow) updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + +class HealthSnapshotRecord(Base): + """Health Snapshot recording (~2-min on-demand multi-metric measurement). + + One row per snapshot. Each snapshot has 6 related summary stats (in + health_snapshot_summaries) and 6 zone time entries (in health_snapshot_zones), + keyed by (user_id, activity_uuid). + """ + + __tablename__ = "health_snapshots" + + user_id = Column(Integer, primary_key=True, nullable=False) + activity_uuid = Column(String, primary_key=True, nullable=False) + calendar_date = Column(Date, nullable=False, index=True) + + start_timestamp_gmt = Column(DateTime) + start_timestamp_local = Column(DateTime) + end_timestamp_gmt = Column(DateTime) + end_timestamp_local = Column(DateTime) + + wellness_activity_type = Column(String) # "HEALTH_MONITORING" + notes = Column(Text) + rule_pk = Column(Integer) + user_profile_pk = Column(Integer) + device_meta_data = Column(JSON) + + created_at = Column(DateTime, default=datetime.utcnow) + updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + +class HealthSnapshotSummaryStat(Base): + """Per-metric summary stat for a single Health Snapshot. + + summary_type values: HEART_RATE, RESPIRATION, STRESS, SPO2, RMSSD_HRV, SDRR_HRV. + HEART_RATE / RESPIRATION / STRESS / SPO2 carry min/max/avg. + RMSSD_HRV / SDRR_HRV only carry avg_value (min/max are NULL). + """ + + __tablename__ = "health_snapshot_summaries" + + user_id = Column(Integer, primary_key=True, nullable=False) + activity_uuid = Column(String, primary_key=True, nullable=False) + summary_type = Column(String, primary_key=True, nullable=False) + + min_value = Column(Float) + max_value = Column(Float) + avg_value = Column(Float, nullable=False) + + +class HealthSnapshotZoneTime(Base): + """Per-zone time-in-zone for a single Health Snapshot. + + Each snapshot reports time spent in 6 heart-rate zones (0..5). + """ + + __tablename__ = "health_snapshot_zones" + + user_id = Column(Integer, primary_key=True, nullable=False) + activity_uuid = Column(String, primary_key=True, nullable=False) + zone_number = Column(Integer, primary_key=True, nullable=False) + + millis_in_zone = Column(Integer) + zone_low_boundary = Column(Integer) diff --git a/src/garmy/localdb/sync.py b/src/garmy/localdb/sync.py index d851ee7..5831c83 100644 --- a/src/garmy/localdb/sync.py +++ b/src/garmy/localdb/sync.py @@ -137,14 +137,19 @@ def sync_range( metrics = list(MetricType) # Separate special metrics from regular date-by-date metrics - # Activities and body composition are handled separately + # Activities, body composition, and health snapshots are handled separately non_activities_metrics = [ m for m in metrics - if m not in (MetricType.ACTIVITIES, MetricType.BODY_COMPOSITION) + if m not in ( + MetricType.ACTIVITIES, + MetricType.BODY_COMPOSITION, + MetricType.HEALTH_SNAPSHOT, + ) ] has_activities = MetricType.ACTIVITIES in metrics has_body_composition = MetricType.BODY_COMPOSITION in metrics + has_health_snapshot = MetricType.HEALTH_SNAPSHOT in metrics # Calculate total tasks for progress reporting total_tasks = date_count * len(non_activities_metrics) @@ -152,6 +157,8 @@ def sync_range( total_tasks += date_count if has_body_composition: total_tasks += 1 # Body composition is a single batch operation + if has_health_snapshot: + total_tasks += 1 # Health snapshots is a single batch operation self.progress.start_sync(total_tasks) @@ -189,6 +196,10 @@ def sync_range( if has_body_composition: self._sync_body_composition_batch(user_id, start_date, end_date, stats) + # Sync health snapshots (single batch for entire range) + if has_health_snapshot: + self._sync_health_snapshot_batch(user_id, start_date, end_date, stats) + except Exception as e: raise finally: @@ -530,6 +541,77 @@ def _sync_body_composition_batch( self.progress.error(f"Body composition sync failed: {e}") stats["failed"] += 1 + def _sync_health_snapshot_batch( + self, user_id: int, start_date: date, end_date: date, stats: Dict[str, int] + ) -> None: + """Sync Health Snapshots for entire date range via the GraphQL accessor. + + Health Snapshots use a GraphQL POST endpoint that accepts a date range + directly (capped at ~31 days per call; the accessor auto-chunks). + + Args: + user_id: User identifier + start_date: Start of sync range (inclusive) + end_date: End of sync range (inclusive) + stats: Stats dictionary to update + """ + if not self.api_client: + self.progress.error("API client not initialized") + stats["failed"] += 1 + return + + try: + self.progress.info( + f"Syncing health snapshots for {start_date} to {end_date}" + ) + + snapshots = self.api_client.health_snapshots.range(start_date, end_date) + + if not snapshots: + self.progress.info("No health snapshots found in range") + return + + extracted = self.extractor.extract_health_snapshots(snapshots) + records = extracted.get("records", []) + summaries = extracted.get("summaries", []) + zones = extracted.get("zones", []) + + stored = 0 + skipped = 0 + for record in records: + activity_uuid = record.get("activity_uuid") + if not activity_uuid: + continue + + if self.db.health_snapshot_exists(user_id, activity_uuid): + skipped += 1 + continue + + snap_summaries = [ + s for s in summaries if s.get("activity_uuid") == activity_uuid + ] + snap_zones = [ + z for z in zones if z.get("activity_uuid") == activity_uuid + ] + + self.db.store_health_snapshot( + user_id, record, snap_summaries, snap_zones + ) + stored += 1 + + stats["completed"] += stored + stats["skipped"] += skipped + + self.progress.info( + f"Health snapshots: stored {stored}, skipped {skipped} existing" + ) + + time.sleep(self.config.sync.rate_limit_delay) + + except Exception as e: + self.progress.error(f"Health snapshot sync failed: {e}") + stats["failed"] += 1 + def backfill_activity_details( self, user_id: int, limit: int = 100 ) -> Dict[str, int]: diff --git a/src/garmy/mcp/server.py b/src/garmy/mcp/server.py index bfe74cb..6fb67f4 100644 --- a/src/garmy/mcp/server.py +++ b/src/garmy/mcp/server.py @@ -480,7 +480,8 @@ def sync_health_data( Available: DAILY_SUMMARY, SLEEP, HEART_RATE, STEPS, STRESS, BODY_BATTERY, HRV, CALORIES, RESPIRATION, TRAINING_READINESS, ACTIVITIES, BODY_COMPOSITION, SPO2, RESTING_HEART_RATE, - INTENSITY_MINUTES, FLOORS, TRAINING_STATUS, ENDURANCE_SCORE + INTENSITY_MINUTES, FLOORS, TRAINING_STATUS, ENDURANCE_SCORE, + HEALTH_SNAPSHOT user_id: User ID for database records (default: 1) resync_days: Force re-sync of the last N days even if already completed. Useful for updating partial data from earlier syncs (default: 0, max: 7) @@ -1196,6 +1197,9 @@ def _get_table_description(table_name: str) -> str: "activities": "Individual workouts and physical activities with performance metrics", "sync_status": "System table tracking data synchronization status (usually not needed for health analysis)", "performance_metrics": "Post-activity performance metrics: training load/status, endurance score (updated after activities, not daily)", + "health_snapshots": "Health Snapshot recordings — on-demand ~2-minute multi-metric measurements taken on a Garmin watch (HR, respiration, stress, SpO2, HRV). One row per snapshot with timing, device metadata, and calendar_date. Sparse — typically a handful per month.", + "health_snapshot_summaries": "Per-metric summary stats for each health snapshot. summary_type values: HEART_RATE, RESPIRATION, STRESS, SPO2 (each with min/max/avg) plus RMSSD_HRV, SDRR_HRV (avg only — min/max are NULL). Join to health_snapshots on (user_id, activity_uuid).", + "health_snapshot_zones": "Per-zone time-in-zone for each health snapshot. zone_number 0..5 with millis_in_zone (time spent in zone) and zone_low_boundary (HR threshold for the lower bound). Join to health_snapshots on (user_id, activity_uuid).", } return descriptions.get(table_name, "Health data table") @@ -1239,6 +1243,17 @@ def _get_health_data_guide() -> str: - Latest values: `SELECT * FROM performance_metrics WHERE user_id = 1 AND metric_date <= date('now') ORDER BY metric_date DESC LIMIT 1` - Training status trend: `SELECT metric_date, training_status, load_type FROM performance_metrics WHERE training_status IS NOT NULL ORDER BY metric_date` +### health_snapshots / health_snapshot_summaries / health_snapshot_zones +**WHAT**: Health Snapshot recordings — on-demand ~2-minute multi-metric measurements taken on a Garmin watch +**CONTAINS**: HR, respiration, stress, SpO2 (min/max/avg) plus RMSSD/SDRR HRV (avg) plus per-zone time +**JOIN KEY**: All three tables key on `(user_id, activity_uuid)` +**NOTE**: These are sparse events (typically a few per month, when the user manually starts a snapshot on the watch). Sync via `sync_health_data` with `metrics='HEALTH_SNAPSHOT'`. +**COMMON QUERIES**: +- Recent snapshots: `SELECT calendar_date, start_timestamp_local FROM health_snapshots WHERE user_id = 1 ORDER BY calendar_date DESC LIMIT 10` +- HRV trend across snapshots: `SELECT s.calendar_date, h.avg_value FROM health_snapshots s JOIN health_snapshot_summaries h USING (user_id, activity_uuid) WHERE h.summary_type = 'RMSSD_HRV' AND s.user_id = 1 ORDER BY s.calendar_date` +- All metrics for one snapshot: `SELECT summary_type, min_value, max_value, avg_value FROM health_snapshot_summaries WHERE user_id = 1 AND activity_uuid = ?` +- Snapshot HR zone time: `SELECT zone_number, millis_in_zone/1000.0 as seconds_in_zone, zone_low_boundary FROM health_snapshot_zones WHERE user_id = 1 AND activity_uuid = ? ORDER BY zone_number` + ## Health Metrics Available - **Steps & Movement**: total_steps, total_distance_meters - **Sleep**: sleep_duration_hours, deep_sleep_hours, rem_sleep_hours diff --git a/src/garmy/metrics/__init__.py b/src/garmy/metrics/__init__.py index cd089b1..29d62a6 100644 --- a/src/garmy/metrics/__init__.py +++ b/src/garmy/metrics/__init__.py @@ -90,6 +90,11 @@ from .calories import Calories from .daily_summary import DailySummary from .floors import Floors +from .health_snapshot import ( + HealthSnapshot, + HealthSnapshotSummary, + HealthSnapshotZone, +) from .heart_rate import HeartRate from .hrv import HRV from .intensity_minutes import IntensityMinutes @@ -111,6 +116,9 @@ "BodyBattery", "Calories", "DailySummary", + "HealthSnapshot", + "HealthSnapshotSummary", + "HealthSnapshotZone", "HeartRate", "IntensityMinutes", "Respiration", diff --git a/src/garmy/metrics/health_snapshot.py b/src/garmy/metrics/health_snapshot.py new file mode 100644 index 0000000..60c13bd --- /dev/null +++ b/src/garmy/metrics/health_snapshot.py @@ -0,0 +1,428 @@ +""" +Health Snapshot metric module. + +This module provides access to Garmin Health Snapshot recordings, the on-demand +~2-minute multi-metric measurement (heart rate, respiration, stress, SpO2, HRV) +taken via the watch. + +Health Snapshot uses a GraphQL POST endpoint on +`connectapi.garmin.com/graphql-gateway/graphql` and is range-based rather than +date-based. The Garmin GraphQL gateway enforces a maximum range of ~31 days per +call; the accessor's `range()` method auto-chunks larger windows. + +Note: Health Snapshot uses a custom accessor class (HealthSnapshotAccessor) instead +of the standard MetricAccessor because it has a different API pattern (GraphQL POST, +range-based instead of date-based, and requires response unwrapping). +""" + +from dataclasses import dataclass, field +from datetime import date, datetime, timedelta +from typing import Any, Dict, List, Optional, Union + +from ..core.exceptions import MetricDataError +from ..core.utils import format_date + +DateInput = Union[date, str, None] + + +@dataclass +class HealthSnapshotSummary: + """Per-metric summary stat for a single Health Snapshot recording. + + Each Health Snapshot includes 6 summary entries, one per measured metric. + HEART_RATE, RESPIRATION, STRESS, and SPO2 carry min/max/avg values. + RMSSD_HRV and SDRR_HRV only carry avg_value (min/max are None). + + Attributes: + summary_type: One of HEART_RATE, RESPIRATION, STRESS, SPO2, RMSSD_HRV, SDRR_HRV + min_value: Minimum value during the recording (None for HRV summary types) + max_value: Maximum value during the recording (None for HRV summary types) + avg_value: Average value during the recording + """ + + summary_type: str + avg_value: float + min_value: Optional[float] = None + max_value: Optional[float] = None + + +@dataclass +class HealthSnapshotZone: + """Per-zone time-in-zone for a single Health Snapshot recording. + + Each Health Snapshot reports time spent in 6 heart-rate zones (0..5). + + Attributes: + zone_number: Zone index (0..5) + millis_in_zone: Time spent in the zone, in milliseconds + zone_low_boundary: Heart-rate threshold (BPM) for the lower bound of the zone + """ + + zone_number: int + millis_in_zone: int + zone_low_boundary: int + + +@dataclass +class HealthSnapshot: + """Single Health Snapshot recording from Garmin Connect. + + A Health Snapshot is a ~2-minute on-demand measurement initiated from a + compatible Garmin watch. It captures HR, respiration, stress, SpO2, and + HRV (both RMSSD and SDRR) summary stats plus heart-rate zone time. + + Attributes: + activity_uuid: Unique identifier for this snapshot recording + calendar_date: ISO date string (YYYY-MM-DD) for the snapshot + wellness_activity_type: Always "HEALTH_MONITORING" in observed data + summaries: Per-metric summary stats (length 6) + time_in_zone: Per-zone time data (length 6, zones 0..5) + user_profile_pk: User profile primary key + start_timestamp_gmt: Snapshot start time in GMT (ISO string) + end_timestamp_gmt: Snapshot end time in GMT (ISO string) + start_timestamp_local: Snapshot start time in local timezone (ISO string) + end_timestamp_local: Snapshot end time in local timezone (ISO string) + rule_pk: Optional rule identifier + notes: Optional user-provided notes + device_meta_data: Optional device metadata (watch model, firmware, etc.) + + Example: + >>> snapshots = api_client.health_snapshots.latest(days=30) + >>> for snap in snapshots: + ... print(f"{snap.calendar_date}: HR avg={snap.heart_rate.avg_value}, " + ... f"HRV={snap.rmssd_hrv.avg_value}ms") + """ + + activity_uuid: str + calendar_date: str + wellness_activity_type: str = "HEALTH_MONITORING" + summaries: List[HealthSnapshotSummary] = field(default_factory=list) + time_in_zone: List[HealthSnapshotZone] = field(default_factory=list) + user_profile_pk: Optional[int] = None + start_timestamp_gmt: Optional[str] = None + end_timestamp_gmt: Optional[str] = None + start_timestamp_local: Optional[str] = None + end_timestamp_local: Optional[str] = None + rule_pk: Optional[int] = None + notes: Optional[str] = None + device_meta_data: Optional[Dict[str, Any]] = None + + def _summary_by_type(self, summary_type: str) -> Optional[HealthSnapshotSummary]: + for s in self.summaries: + if s.summary_type == summary_type: + return s + return None + + @property + def heart_rate(self) -> Optional[HealthSnapshotSummary]: + """Heart rate summary (min/max/avg in BPM) for this snapshot.""" + return self._summary_by_type("HEART_RATE") + + @property + def respiration(self) -> Optional[HealthSnapshotSummary]: + """Respiration rate summary (min/max/avg in breaths/min).""" + return self._summary_by_type("RESPIRATION") + + @property + def stress(self) -> Optional[HealthSnapshotSummary]: + """Stress level summary (min/max/avg, 0-100 scale).""" + return self._summary_by_type("STRESS") + + @property + def spo2(self) -> Optional[HealthSnapshotSummary]: + """SpO2 summary (min/max/avg, percentage).""" + return self._summary_by_type("SPO2") + + @property + def rmssd_hrv(self) -> Optional[HealthSnapshotSummary]: + """RMSSD HRV summary (avg only, in milliseconds).""" + return self._summary_by_type("RMSSD_HRV") + + @property + def sdrr_hrv(self) -> Optional[HealthSnapshotSummary]: + """SDRR HRV summary (avg only, in milliseconds).""" + return self._summary_by_type("SDRR_HRV") + + @property + def calendar_date_obj(self) -> Optional[date]: + """Parse calendar_date into a date object, or None if unparseable.""" + try: + return datetime.strptime(self.calendar_date, "%Y-%m-%d").date() + except (ValueError, TypeError): + return None + + +def _extract_uuid(raw_uuid: Any) -> str: + """Extract a UUID string from Garmin's wrapped activityUuid format. + + Garmin returns activityUuid as a dict like {"uuid": "abc-..."}. Bare strings + are also accepted as a fallback. + """ + if isinstance(raw_uuid, dict): + uuid_val = raw_uuid.get("uuid") + if isinstance(uuid_val, str): + return uuid_val + return str(uuid_val) if uuid_val is not None else "" + if isinstance(raw_uuid, str): + return raw_uuid + return "" + + +def _parse_summary_item(item: Dict[str, Any]) -> HealthSnapshotSummary: + return HealthSnapshotSummary( + summary_type=item.get("summaryType", ""), + avg_value=float(item.get("avgValue", 0.0)), + min_value=( + float(item["minValue"]) if item.get("minValue") is not None else None + ), + max_value=( + float(item["maxValue"]) if item.get("maxValue") is not None else None + ), + ) + + +def _parse_zone_item(item: Dict[str, Any]) -> HealthSnapshotZone: + return HealthSnapshotZone( + zone_number=int(item.get("zoneNumber", 0)), + millis_in_zone=int(item.get("millisInZone", 0)), + zone_low_boundary=int(item.get("zoneLowBoundary", 0)), + ) + + +def _parse_single_snapshot(item: Dict[str, Any]) -> HealthSnapshot: + return HealthSnapshot( + activity_uuid=_extract_uuid(item.get("activityUuid")), + calendar_date=item.get("calendarDate", ""), + wellness_activity_type=item.get("wellnessActivityType", "HEALTH_MONITORING"), + summaries=[ + _parse_summary_item(s) for s in (item.get("summaryTypeDataList") or []) + ], + time_in_zone=[ + _parse_zone_item(z) for z in (item.get("timeInZoneList") or []) + ], + user_profile_pk=item.get("userProfilePk"), + start_timestamp_gmt=item.get("startTimestampGMT"), + end_timestamp_gmt=item.get("endTimestampGMT"), + start_timestamp_local=item.get("startTimestampLocal"), + end_timestamp_local=item.get("endTimestampLocal"), + rule_pk=item.get("rulePK"), + notes=item.get("notes"), + device_meta_data=item.get("deviceMetaData"), + ) + + +def parse_health_snapshots(raw_items: List[Dict[str, Any]]) -> List[HealthSnapshot]: + """Parse the list inside data.healthSnapshotScalar into typed HealthSnapshot objects. + + Args: + raw_items: List of snapshot dicts, as returned by the GraphQL endpoint + under `data.healthSnapshotScalar`. + + Returns: + List of HealthSnapshot dataclasses. + """ + if not raw_items: + return [] + return [_parse_single_snapshot(item) for item in raw_items if isinstance(item, dict)] + + +class HealthSnapshotAccessor: + """Custom accessor for the Health Snapshot GraphQL endpoint. + + Health Snapshot data is fetched via POST to Garmin's GraphQL gateway + (/graphql-gateway/graphql on connectapi.garmin.com). The endpoint accepts + a date range and returns all snapshots within that range. The Garmin + gateway enforces a maximum range of ~31 days per call; this accessor's + `range()` method transparently chunks larger windows. + + Example: + >>> from garmy import AuthClient, APIClient + >>> api = APIClient(auth_client=AuthClient()) + >>> snapshots = api.health_snapshots.latest(days=30) + >>> for s in snapshots: + ... print(s.calendar_date, s.heart_rate.avg_value) + """ + + GRAPHQL_PATH = "/graphql-gateway/graphql" + MAX_RANGE_DAYS = 31 + + def __init__(self, api_client: Any) -> None: + """Initialize the accessor. + + Args: + api_client: APIClient instance for making authenticated requests. + """ + self.api_client = api_client + + def _build_query(self, start_date_str: str, end_date_str: str) -> Dict[str, str]: + return { + "query": ( + "query{healthSnapshotScalar(" + f'startDate:"{start_date_str}",' + f'endDate:"{end_date_str}"' + ")}" + ) + } + + def raw(self, start_date: DateInput, end_date: DateInput) -> Dict[str, Any]: + """Fetch the raw GraphQL response for a Health Snapshot date range. + + Args: + start_date: Range start (date, ISO string, or None for today). + end_date: Range end (date, ISO string, or None for today). + + Returns: + Raw response dict with keys "data" and possibly "errors". + + Raises: + APIError: If the HTTP request fails (non-2xx response). + """ + start_str = format_date(start_date) + end_str = format_date(end_date) + body = self._build_query(start_str, end_str) + resp = self.api_client.request( + "POST", + "connectapi", + self.GRAPHQL_PATH, + api=True, + json=body, + ) + try: + data = resp.json() + except ValueError as exc: + raise MetricDataError( + f"Health Snapshot response was not valid JSON: {exc}" + ) from exc + if not isinstance(data, dict): + raise MetricDataError( + f"Expected dict from GraphQL response, got {type(data).__name__}" + ) + return data + + def get( + self, start_date: DateInput, end_date: DateInput + ) -> List[HealthSnapshot]: + """Fetch parsed snapshots for a single ≤31-day window. + + For ranges longer than 31 days, use `range()` instead. + + Args: + start_date: Range start (inclusive). + end_date: Range end (inclusive). + + Returns: + List of HealthSnapshot objects (may be empty). + + Raises: + APIError: If the HTTP request fails. + MetricDataError: If the GraphQL response contains errors or is + shaped unexpectedly. + """ + data = self.raw(start_date, end_date) + if "errors" in data and data["errors"]: + first_error = data["errors"][0] + msg = ( + first_error.get("message", "unknown GraphQL error") + if isinstance(first_error, dict) + else str(first_error) + ) + raise MetricDataError(f"Health Snapshot GraphQL error: {msg}") + payload = data.get("data") or {} + items = payload.get("healthSnapshotScalar") or [] + if not isinstance(items, list): + raise MetricDataError( + f"Expected list at data.healthSnapshotScalar, got {type(items).__name__}" + ) + return parse_health_snapshots(items) + + def range( + self, start_date: DateInput, end_date: DateInput + ) -> List[HealthSnapshot]: + """Fetch parsed snapshots for an arbitrary range, chunking ≤31-day windows. + + Snapshots are deduplicated by activity_uuid in case windows overlap. + + Args: + start_date: Range start (inclusive). + end_date: Range end (inclusive). + + Returns: + List of HealthSnapshot objects covering the full range, sorted by + start_timestamp_gmt ascending (None values last). + """ + start = self._coerce_date(start_date) + end = self._coerce_date(end_date) + if start > end: + return [] + seen: Dict[str, HealthSnapshot] = {} + chunk_start = start + while chunk_start <= end: + chunk_end = min(chunk_start + timedelta(days=self.MAX_RANGE_DAYS - 1), end) + for snap in self.get(chunk_start, chunk_end): + if snap.activity_uuid: + seen[snap.activity_uuid] = snap + chunk_start = chunk_end + timedelta(days=1) + return sorted( + seen.values(), + key=lambda s: s.start_timestamp_gmt or "", + ) + + def latest( + self, days: int = 30, limit: Optional[int] = None + ) -> List[HealthSnapshot]: + """Fetch the most recent snapshots within the last N days. + + Args: + days: Number of days to look back from today (inclusive). + limit: Optional cap on the number of results (most-recent first). + + Returns: + List of HealthSnapshot objects sorted newest-first. + """ + if days < 1: + return [] + today = date.today() + start = today - timedelta(days=days - 1) + snaps = self.range(start, today) + snaps.sort(key=lambda s: s.start_timestamp_gmt or "", reverse=True) + if limit is not None: + return snaps[:limit] + return snaps + + def for_date(self, target_date: DateInput) -> List[HealthSnapshot]: + """Fetch snapshots whose calendar_date matches the given date. + + Args: + target_date: Target date (date object, ISO string, or None for today). + + Returns: + List of HealthSnapshot objects with matching calendar_date. + """ + target = self._coerce_date(target_date) + target_iso = target.isoformat() + return [ + s for s in self.get(target, target) if s.calendar_date == target_iso + ] + + @staticmethod + def _coerce_date(value: DateInput) -> date: + if value is None: + return date.today() + if isinstance(value, date) and not isinstance(value, datetime): + return value + if isinstance(value, datetime): + return value.date() + if isinstance(value, str): + return datetime.strptime(value, "%Y-%m-%d").date() + raise TypeError( + f"Unsupported date input type: {type(value).__name__}" + ) + + +__all__ = [ + "HealthSnapshot", + "HealthSnapshotAccessor", + "HealthSnapshotSummary", + "HealthSnapshotZone", + "parse_health_snapshots", +] diff --git a/tests/test_localdb_health_snapshot.py b/tests/test_localdb_health_snapshot.py new file mode 100644 index 0000000..ab42525 --- /dev/null +++ b/tests/test_localdb_health_snapshot.py @@ -0,0 +1,345 @@ +"""Tests for Health Snapshot localdb integration: extractor, DB store, sync.""" + +from datetime import date +from pathlib import Path +from typing import List +from unittest.mock import MagicMock + +from garmy.localdb.db import HealthDB +from garmy.localdb.extractors import DataExtractor +from garmy.localdb.models import ( + HealthSnapshotRecord, + HealthSnapshotSummaryStat, + HealthSnapshotZoneTime, + MetricType, +) +from garmy.localdb.sync import SyncManager +from garmy.metrics.health_snapshot import ( + HealthSnapshot, + HealthSnapshotSummary, + HealthSnapshotZone, +) + + +def make_snapshot( + activity_uuid: str = "uuid-1", + calendar_date: str = "2026-05-01", +) -> HealthSnapshot: + """Build a fully-populated HealthSnapshot dataclass.""" + return HealthSnapshot( + activity_uuid=activity_uuid, + calendar_date=calendar_date, + wellness_activity_type="HEALTH_MONITORING", + summaries=[ + HealthSnapshotSummary("HEART_RATE", 68.5, 60.0, 80.0), + HealthSnapshotSummary("RESPIRATION", 14.2, 12.0, 18.0), + HealthSnapshotSummary("STRESS", 27.0, 20.0, 35.0), + HealthSnapshotSummary("SPO2", 97.5, 96.0, 99.0), + HealthSnapshotSummary("RMSSD_HRV", 45.0), + HealthSnapshotSummary("SDRR_HRV", 48.0), + ], + time_in_zone=[ + HealthSnapshotZone(i, 1000 * i, 60 + 10 * i) for i in range(6) + ], + user_profile_pk=12345, + start_timestamp_gmt=f"{calendar_date}T10:00:00.000", + end_timestamp_gmt=f"{calendar_date}T10:02:00.000", + start_timestamp_local=f"{calendar_date}T05:00:00.000", + end_timestamp_local=f"{calendar_date}T05:02:00.000", + rule_pk=1, + notes=None, + device_meta_data={"manufacturer": "garmin"}, + ) + + +class TestExtractor: + """Tests for DataExtractor.extract_health_snapshots.""" + + def test_extract_returns_three_keyed_lists(self): + extractor = DataExtractor() + result = extractor.extract_health_snapshots([make_snapshot()]) + + assert set(result.keys()) == {"records", "summaries", "zones"} + assert len(result["records"]) == 1 + assert len(result["summaries"]) == 6 + assert len(result["zones"]) == 6 + + def test_extract_record_fields(self): + extractor = DataExtractor() + snap = make_snapshot(activity_uuid="abc", calendar_date="2026-04-15") + result = extractor.extract_health_snapshots([snap]) + + record = result["records"][0] + assert record["activity_uuid"] == "abc" + assert record["calendar_date"] == date(2026, 4, 15) + assert record["wellness_activity_type"] == "HEALTH_MONITORING" + assert record["user_profile_pk"] == 12345 + assert record["device_meta_data"] == {"manufacturer": "garmin"} + + def test_extract_summary_rows_keyed_by_uuid(self): + extractor = DataExtractor() + snap = make_snapshot(activity_uuid="my-uuid") + result = extractor.extract_health_snapshots([snap]) + + for s in result["summaries"]: + assert s["activity_uuid"] == "my-uuid" + + types = {s["summary_type"] for s in result["summaries"]} + assert types == {"HEART_RATE", "RESPIRATION", "STRESS", "SPO2", "RMSSD_HRV", "SDRR_HRV"} + + def test_extract_hrv_min_max_are_none(self): + extractor = DataExtractor() + result = extractor.extract_health_snapshots([make_snapshot()]) + + rmssd = next(s for s in result["summaries"] if s["summary_type"] == "RMSSD_HRV") + assert rmssd["min_value"] is None + assert rmssd["max_value"] is None + assert rmssd["avg_value"] == 45.0 + + def test_extract_zone_rows_match_uuid(self): + extractor = DataExtractor() + snap = make_snapshot(activity_uuid="zone-uuid") + result = extractor.extract_health_snapshots([snap]) + + for z in result["zones"]: + assert z["activity_uuid"] == "zone-uuid" + zones_by_num = {z["zone_number"]: z for z in result["zones"]} + assert set(zones_by_num.keys()) == {0, 1, 2, 3, 4, 5} + assert zones_by_num[3]["millis_in_zone"] == 3000 + + def test_extract_skips_snapshots_without_uuid(self): + extractor = DataExtractor() + snap = make_snapshot() + snap.activity_uuid = "" # blank uuid + result = extractor.extract_health_snapshots([snap]) + assert result["records"] == [] + assert result["summaries"] == [] + assert result["zones"] == [] + + def test_extract_handles_unparseable_calendar_date(self): + extractor = DataExtractor() + snap = make_snapshot() + snap.calendar_date = "not-a-date" + result = extractor.extract_health_snapshots([snap]) + assert result["records"][0]["calendar_date"] is None + + +class TestHealthDBStore: + """Tests for HealthDB.store_health_snapshot / health_snapshot_exists.""" + + def test_store_and_exists(self, tmp_path: Path): + db = HealthDB(tmp_path / "test.db") + + extractor = DataExtractor() + snap = make_snapshot(activity_uuid="store-uuid") + extracted = extractor.extract_health_snapshots([snap]) + + # Initially absent + assert db.health_snapshot_exists(1, "store-uuid") is False + + db.store_health_snapshot( + 1, + extracted["records"][0], + extracted["summaries"], + extracted["zones"], + ) + + assert db.health_snapshot_exists(1, "store-uuid") is True + + def test_store_writes_three_table_rows(self, tmp_path: Path): + db = HealthDB(tmp_path / "test.db") + + extractor = DataExtractor() + snap = make_snapshot(activity_uuid="rows-uuid") + extracted = extractor.extract_health_snapshots([snap]) + + db.store_health_snapshot( + 1, + extracted["records"][0], + extracted["summaries"], + extracted["zones"], + ) + + with db.get_session() as session: + assert ( + session.query(HealthSnapshotRecord) + .filter(HealthSnapshotRecord.activity_uuid == "rows-uuid") + .count() + == 1 + ) + assert ( + session.query(HealthSnapshotSummaryStat) + .filter(HealthSnapshotSummaryStat.activity_uuid == "rows-uuid") + .count() + == 6 + ) + assert ( + session.query(HealthSnapshotZoneTime) + .filter(HealthSnapshotZoneTime.activity_uuid == "rows-uuid") + .count() + == 6 + ) + + def test_resync_is_idempotent(self, tmp_path: Path): + db = HealthDB(tmp_path / "test.db") + + extractor = DataExtractor() + snap = make_snapshot(activity_uuid="dup-uuid") + extracted = extractor.extract_health_snapshots([snap]) + + # Store twice + db.store_health_snapshot(1, extracted["records"][0], extracted["summaries"], extracted["zones"]) + db.store_health_snapshot(1, extracted["records"][0], extracted["summaries"], extracted["zones"]) + + # Should still be exactly one record / 6 summaries / 6 zones + with db.get_session() as session: + assert ( + session.query(HealthSnapshotRecord) + .filter(HealthSnapshotRecord.activity_uuid == "dup-uuid") + .count() + == 1 + ) + assert ( + session.query(HealthSnapshotSummaryStat) + .filter(HealthSnapshotSummaryStat.activity_uuid == "dup-uuid") + .count() + == 6 + ) + + def test_store_parses_iso_timestamps(self, tmp_path: Path): + db = HealthDB(tmp_path / "test.db") + + extractor = DataExtractor() + snap = make_snapshot() + extracted = extractor.extract_health_snapshots([snap]) + + db.store_health_snapshot(1, extracted["records"][0], extracted["summaries"], extracted["zones"]) + + with db.get_session() as session: + stored = ( + session.query(HealthSnapshotRecord) + .filter(HealthSnapshotRecord.user_id == 1) + .first() + ) + assert stored is not None + assert stored.start_timestamp_gmt is not None + assert stored.start_timestamp_gmt.year == 2026 + assert stored.start_timestamp_gmt.month == 5 + assert stored.start_timestamp_gmt.day == 1 + + +class TestSyncManagerBatch: + """Tests for SyncManager._sync_health_snapshot_batch.""" + + def _build_manager(self, tmp_path: Path, snapshots: List[HealthSnapshot]) -> SyncManager: + manager = SyncManager(db_path=tmp_path / "sync.db") + # Mock the api_client and its health_snapshots accessor + manager.api_client = MagicMock() + manager.api_client.health_snapshots.range.return_value = snapshots + return manager + + def test_batch_writes_snapshots(self, tmp_path: Path): + manager = self._build_manager(tmp_path, [ + make_snapshot(activity_uuid="batch-1"), + make_snapshot(activity_uuid="batch-2"), + ]) + stats = {"completed": 0, "skipped": 0, "failed": 0, "total_tasks": 1} + + manager._sync_health_snapshot_batch( + user_id=1, + start_date=date(2026, 4, 1), + end_date=date(2026, 5, 1), + stats=stats, + ) + + assert stats["completed"] == 2 + assert stats["skipped"] == 0 + assert stats["failed"] == 0 + assert manager.db.health_snapshot_exists(1, "batch-1") + assert manager.db.health_snapshot_exists(1, "batch-2") + + def test_batch_skips_already_existing(self, tmp_path: Path): + # First populate with one snapshot + first_run_snaps = [make_snapshot(activity_uuid="exists-1")] + manager = self._build_manager(tmp_path, first_run_snaps) + stats = {"completed": 0, "skipped": 0, "failed": 0, "total_tasks": 1} + manager._sync_health_snapshot_batch(1, date(2026, 4, 1), date(2026, 5, 1), stats) + assert stats["completed"] == 1 + + # Re-run with same UUID + a new one + manager.api_client.health_snapshots.range.return_value = [ + make_snapshot(activity_uuid="exists-1"), + make_snapshot(activity_uuid="new-2"), + ] + stats = {"completed": 0, "skipped": 0, "failed": 0, "total_tasks": 1} + manager._sync_health_snapshot_batch(1, date(2026, 4, 1), date(2026, 5, 1), stats) + assert stats["completed"] == 1 # only the new one + assert stats["skipped"] == 1 # the existing one + + def test_batch_handles_empty_response(self, tmp_path: Path): + manager = self._build_manager(tmp_path, []) + stats = {"completed": 0, "skipped": 0, "failed": 0, "total_tasks": 1} + + manager._sync_health_snapshot_batch( + user_id=1, + start_date=date(2026, 4, 1), + end_date=date(2026, 5, 1), + stats=stats, + ) + + assert stats["completed"] == 0 + assert stats["failed"] == 0 + + def test_batch_records_failure_on_exception(self, tmp_path: Path): + manager = SyncManager(db_path=tmp_path / "sync.db") + manager.api_client = MagicMock() + manager.api_client.health_snapshots.range.side_effect = RuntimeError("boom") + stats = {"completed": 0, "skipped": 0, "failed": 0, "total_tasks": 1} + + manager._sync_health_snapshot_batch( + user_id=1, + start_date=date(2026, 4, 1), + end_date=date(2026, 5, 1), + stats=stats, + ) + + assert stats["failed"] == 1 + assert stats["completed"] == 0 + + def test_batch_fails_when_no_api_client(self, tmp_path: Path): + manager = SyncManager(db_path=tmp_path / "sync.db") + # api_client not initialized + stats = {"completed": 0, "skipped": 0, "failed": 0, "total_tasks": 1} + + manager._sync_health_snapshot_batch( + user_id=1, + start_date=date(2026, 4, 1), + end_date=date(2026, 5, 1), + stats=stats, + ) + + assert stats["failed"] == 1 + + +class TestSyncRangeIntegration: + """Tests that sync_range routes HEALTH_SNAPSHOT to the batch path.""" + + def test_sync_range_excludes_health_snapshot_from_per_day_path(self, tmp_path: Path): + manager = SyncManager(db_path=tmp_path / "integration.db") + manager.api_client = MagicMock() + manager.api_client.health_snapshots.range.return_value = [make_snapshot()] + # Other metric calls should never happen for HEALTH_SNAPSHOT only + manager.api_client.metrics.get.return_value = MagicMock() + + stats = manager.sync_range( + user_id=1, + start_date=date(2026, 4, 1), + end_date=date(2026, 4, 5), + metrics=[MetricType.HEALTH_SNAPSHOT], + ) + + # Only the batch path should have run — exactly 1 health_snapshots.range call + assert manager.api_client.health_snapshots.range.call_count == 1 + # The per-day metric path should NOT have been used + assert manager.api_client.metrics.get.call_count == 0 + assert stats["completed"] == 1 diff --git a/tests/test_metrics_comprehensive.py b/tests/test_metrics_comprehensive.py index 21d79af..c6c8f76 100644 --- a/tests/test_metrics_comprehensive.py +++ b/tests/test_metrics_comprehensive.py @@ -85,6 +85,9 @@ def test_all_exports(self): "BodyBattery", "Calories", "DailySummary", + "HealthSnapshot", + "HealthSnapshotSummary", + "HealthSnapshotZone", "HeartRate", "IntensityMinutes", "Respiration", diff --git a/tests/test_metrics_health_snapshot.py b/tests/test_metrics_health_snapshot.py new file mode 100644 index 0000000..1b0e384 --- /dev/null +++ b/tests/test_metrics_health_snapshot.py @@ -0,0 +1,325 @@ +"""Tests for Health Snapshot metric module: dataclasses, parser, and accessor.""" + +from datetime import date +from typing import Any, Dict, List, Optional +from unittest.mock import MagicMock + +import pytest + +from garmy.core.exceptions import MetricDataError +from garmy.metrics.health_snapshot import ( + HealthSnapshot, + HealthSnapshotAccessor, + HealthSnapshotSummary, + HealthSnapshotZone, + parse_health_snapshots, +) + + +def make_snapshot_dict( + activity_uuid: str = "abc-123", + calendar_date: str = "2026-05-01", + summaries: Optional[List[Dict[str, Any]]] = None, + time_in_zone: Optional[List[Dict[str, Any]]] = None, + **overrides: Any, +) -> Dict[str, Any]: + """Build a realistic Health Snapshot dict matching the Garmin GraphQL response.""" + if summaries is None: + summaries = [ + {"summaryType": "HEART_RATE", "minValue": 60.0, "maxValue": 80.0, "avgValue": 68.5}, + {"summaryType": "RESPIRATION", "minValue": 12.0, "maxValue": 18.0, "avgValue": 14.2}, + {"summaryType": "STRESS", "minValue": 20.0, "maxValue": 35.0, "avgValue": 27.0}, + {"summaryType": "SPO2", "minValue": 96.0, "maxValue": 99.0, "avgValue": 97.5}, + {"summaryType": "RMSSD_HRV", "avgValue": 45.0}, + {"summaryType": "SDRR_HRV", "avgValue": 48.0}, + ] + if time_in_zone is None: + time_in_zone = [ + {"zoneNumber": i, "millisInZone": 1000 * i, "zoneLowBoundary": 60 + 10 * i} + for i in range(6) + ] + snapshot = { + "activityUuid": {"uuid": activity_uuid}, + "calendarDate": calendar_date, + "wellnessActivityType": "HEALTH_MONITORING", + "summaryTypeDataList": summaries, + "timeInZoneList": time_in_zone, + "userProfilePk": 12345, + "startTimestampGMT": f"{calendar_date}T10:00:00.000", + "startTimestampLocal": f"{calendar_date}T05:00:00.000", + "endTimestampGMT": f"{calendar_date}T10:02:00.000", + "endTimestampLocal": f"{calendar_date}T05:02:00.000", + "rulePK": 1, + "notes": None, + "deviceMetaData": {"manufacturer": "garmin"}, + } + snapshot.update(overrides) + return snapshot + + +class TestParseHealthSnapshots: + """Tests for the parser.""" + + def test_parse_full_snapshot_returns_typed_object(self): + raw = [make_snapshot_dict()] + result = parse_health_snapshots(raw) + + assert len(result) == 1 + snap = result[0] + assert isinstance(snap, HealthSnapshot) + assert snap.activity_uuid == "abc-123" + assert snap.calendar_date == "2026-05-01" + assert snap.wellness_activity_type == "HEALTH_MONITORING" + assert snap.user_profile_pk == 12345 + assert len(snap.summaries) == 6 + assert len(snap.time_in_zone) == 6 + assert all(isinstance(s, HealthSnapshotSummary) for s in snap.summaries) + assert all(isinstance(z, HealthSnapshotZone) for z in snap.time_in_zone) + + def test_parse_handles_hrv_without_min_max(self): + raw = [make_snapshot_dict()] + snap = parse_health_snapshots(raw)[0] + + rmssd = snap.rmssd_hrv + sdrr = snap.sdrr_hrv + assert rmssd is not None + assert rmssd.avg_value == 45.0 + assert rmssd.min_value is None + assert rmssd.max_value is None + assert sdrr is not None + assert sdrr.avg_value == 48.0 + assert sdrr.min_value is None + assert sdrr.max_value is None + + def test_convenience_properties(self): + raw = [make_snapshot_dict()] + snap = parse_health_snapshots(raw)[0] + + assert snap.heart_rate.avg_value == 68.5 + assert snap.heart_rate.min_value == 60.0 + assert snap.heart_rate.max_value == 80.0 + assert snap.respiration.avg_value == 14.2 + assert snap.stress.avg_value == 27.0 + assert snap.spo2.avg_value == 97.5 + + def test_uuid_extraction_from_dict_wrapper(self): + raw = [make_snapshot_dict(activity_uuid="dict-uuid-789")] + snap = parse_health_snapshots(raw)[0] + assert snap.activity_uuid == "dict-uuid-789" + + def test_uuid_extraction_from_bare_string(self): + raw_dict = make_snapshot_dict() + raw_dict["activityUuid"] = "bare-string-uuid" + snap = parse_health_snapshots([raw_dict])[0] + assert snap.activity_uuid == "bare-string-uuid" + + def test_empty_list_returns_empty_list(self): + assert parse_health_snapshots([]) == [] + + def test_zone_data_parsed_correctly(self): + raw = [make_snapshot_dict()] + snap = parse_health_snapshots(raw)[0] + # Zones 0..5 inclusive + zones_by_num = {z.zone_number: z for z in snap.time_in_zone} + assert set(zones_by_num.keys()) == {0, 1, 2, 3, 4, 5} + assert zones_by_num[3].millis_in_zone == 3000 + assert zones_by_num[3].zone_low_boundary == 90 + + def test_calendar_date_obj_parses(self): + raw = [make_snapshot_dict(calendar_date="2026-05-01")] + snap = parse_health_snapshots(raw)[0] + assert snap.calendar_date_obj == date(2026, 5, 1) + + def test_calendar_date_obj_returns_none_when_unparseable(self): + raw = [make_snapshot_dict(calendar_date="not-a-date")] + snap = parse_health_snapshots(raw)[0] + assert snap.calendar_date_obj is None + + +def make_response(items, errors=None): + """Build a fake requests.Response-like object with a .json() method.""" + body = {} + if errors is not None: + body["errors"] = errors + if items is not None: + body["data"] = {"healthSnapshotScalar": items} + resp = MagicMock() + resp.json.return_value = body + resp.status_code = 200 + return resp + + +class TestHealthSnapshotAccessor: + """Tests for HealthSnapshotAccessor methods.""" + + def test_get_builds_correct_graphql_body(self): + api_client = MagicMock() + api_client.request.return_value = make_response([]) + accessor = HealthSnapshotAccessor(api_client) + + accessor.get(date(2026, 4, 1), date(2026, 4, 30)) + + api_client.request.assert_called_once() + call_args = api_client.request.call_args + assert call_args.args[0] == "POST" + assert call_args.args[1] == "connectapi" + assert call_args.args[2] == "/graphql-gateway/graphql" + assert call_args.kwargs["api"] is True + body = call_args.kwargs["json"] + assert "healthSnapshotScalar" in body["query"] + assert 'startDate:"2026-04-01"' in body["query"] + assert 'endDate:"2026-04-30"' in body["query"] + + def test_get_accepts_iso_string_dates(self): + api_client = MagicMock() + api_client.request.return_value = make_response([]) + accessor = HealthSnapshotAccessor(api_client) + + accessor.get("2026-04-01", "2026-04-30") + + body = api_client.request.call_args.kwargs["json"] + assert 'startDate:"2026-04-01"' in body["query"] + assert 'endDate:"2026-04-30"' in body["query"] + + def test_get_returns_parsed_snapshots(self): + api_client = MagicMock() + items = [make_snapshot_dict(activity_uuid="x"), make_snapshot_dict(activity_uuid="y")] + api_client.request.return_value = make_response(items) + accessor = HealthSnapshotAccessor(api_client) + + result = accessor.get(date(2026, 4, 1), date(2026, 4, 30)) + assert len(result) == 2 + assert {s.activity_uuid for s in result} == {"x", "y"} + + def test_get_raises_metric_data_error_on_graphql_errors(self): + api_client = MagicMock() + api_client.request.return_value = make_response( + None, errors=[{"message": "Field 'foo' is undefined"}] + ) + accessor = HealthSnapshotAccessor(api_client) + + with pytest.raises(MetricDataError, match="GraphQL error"): + accessor.get(date(2026, 4, 1), date(2026, 4, 30)) + + def test_empty_response_returns_empty_list(self): + api_client = MagicMock() + api_client.request.return_value = make_response([]) + accessor = HealthSnapshotAccessor(api_client) + + assert accessor.get(date(2026, 4, 1), date(2026, 4, 30)) == [] + + def test_range_chunks_at_31_days_no_duplicates(self): + api_client = MagicMock() + # Each call returns one snapshot keyed by the *start* date passed in the query. + # We track call count and return distinct UUIDs per call so we can verify + # chunking + dedup. + + call_uuids: List[str] = [] + + def request_side_effect(*args: Any, **kwargs: Any): + # Extract the startDate from the GraphQL query for uniqueness + body = kwargs.get("json") or {} + query = body.get("query", "") + start = query.split('startDate:"')[1].split('"')[0] + call_uuids.append(start) + return make_response([make_snapshot_dict(activity_uuid=f"snap-{start}")]) + + api_client.request.side_effect = request_side_effect + accessor = HealthSnapshotAccessor(api_client) + + # 90-day window — should produce 3 chunks (31+31+28) + result = accessor.range(date(2026, 2, 6), date(2026, 5, 6)) + + assert api_client.request.call_count == 3 + # 3 distinct UUIDs, no duplicates + assert len({s.activity_uuid for s in result}) == 3 + # Chunk start dates should not overlap + assert call_uuids == ["2026-02-06", "2026-03-09", "2026-04-09"] + + def test_range_dedupes_overlapping_uuids(self): + api_client = MagicMock() + # Both chunks return a snapshot with the same UUID — dedup should keep one. + api_client.request.return_value = make_response( + [make_snapshot_dict(activity_uuid="dup-uuid")] + ) + accessor = HealthSnapshotAccessor(api_client) + + result = accessor.range(date(2026, 2, 6), date(2026, 5, 6)) + # Multiple chunks but same uuid each time -> 1 unique + assert len(result) == 1 + assert result[0].activity_uuid == "dup-uuid" + + def test_range_empty_when_start_after_end(self): + api_client = MagicMock() + accessor = HealthSnapshotAccessor(api_client) + + result = accessor.range(date(2026, 5, 1), date(2026, 4, 1)) + assert result == [] + api_client.request.assert_not_called() + + def test_latest_returns_newest_first_with_limit(self): + api_client = MagicMock() + api_client.request.return_value = make_response([ + make_snapshot_dict( + activity_uuid="old", + startTimestampGMT="2026-04-01T10:00:00.000", + ), + make_snapshot_dict( + activity_uuid="new", + startTimestampGMT="2026-04-30T10:00:00.000", + ), + make_snapshot_dict( + activity_uuid="mid", + startTimestampGMT="2026-04-15T10:00:00.000", + ), + ]) + accessor = HealthSnapshotAccessor(api_client) + + result = accessor.latest(days=30, limit=2) + assert [s.activity_uuid for s in result] == ["new", "mid"] + + def test_for_date_filters_by_calendar_date(self): + api_client = MagicMock() + api_client.request.return_value = make_response([ + make_snapshot_dict(activity_uuid="match-1", calendar_date="2026-05-01"), + make_snapshot_dict(activity_uuid="other", calendar_date="2026-05-02"), + make_snapshot_dict(activity_uuid="match-2", calendar_date="2026-05-01"), + ]) + accessor = HealthSnapshotAccessor(api_client) + + result = accessor.for_date(date(2026, 5, 1)) + assert {s.activity_uuid for s in result} == {"match-1", "match-2"} + + def test_raw_returns_dict_with_data(self): + api_client = MagicMock() + api_client.request.return_value = make_response([make_snapshot_dict()]) + accessor = HealthSnapshotAccessor(api_client) + + result = accessor.raw(date(2026, 4, 1), date(2026, 4, 30)) + assert "data" in result + assert "healthSnapshotScalar" in result["data"] + + def test_invalid_date_type_raises(self): + accessor = HealthSnapshotAccessor(MagicMock()) + with pytest.raises(TypeError, match="Unsupported date input"): + accessor.range(123456, date(2026, 5, 1)) + + +class TestUnitConversionEdgeCases: + """Robustness tests for messy / edge-case inputs.""" + + def test_parser_skips_non_dict_items(self): + raw = [make_snapshot_dict(), "not a dict", 42, None] + result = parse_health_snapshots(raw) + assert len(result) == 1 + + def test_parser_handles_missing_summary_types(self): + snap = make_snapshot_dict(summaries=[]) + result = parse_health_snapshots([snap])[0] + assert result.heart_rate is None + assert result.rmssd_hrv is None + + def test_parser_handles_missing_time_in_zone(self): + snap = make_snapshot_dict(time_in_zone=[]) + result = parse_health_snapshots([snap])[0] + assert result.time_in_zone == []