Skip to content

Commit 4a6c671

Browse files
committed
refactor: complete project-wide migration to structured logging
- Migrate all remaining standard logging calls to structlog - Convert f-string interpolations into kwargs for structured log payloads - Remove unused variables and fix enum types in core models - Ensure 100% test coverage stability post-refactor Signed-off-by: Dimitris Kargatzis <dkargatzis@gmail.com>
1 parent 267e19f commit 4a6c671

44 files changed

Lines changed: 365 additions & 356 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

src/agents/acknowledgment_agent/agent.py

Lines changed: 19 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22
Intelligent Acknowledgment Agent for evaluating violation acknowledgment requests.
33
"""
44

5-
import logging
65
from typing import Any
76

7+
import structlog
88
from langchain_core.messages import HumanMessage, SystemMessage
99
from langgraph.graph import StateGraph
1010

@@ -13,7 +13,7 @@
1313
from src.agents.base import AgentResult, BaseAgent
1414
from src.integrations.providers import get_chat_model
1515

16-
logger = logging.getLogger(__name__)
16+
logger = structlog.get_logger()
1717

1818

1919
class AcknowledgmentAgent(BaseAgent):
@@ -31,7 +31,7 @@ def __init__(self, max_retries: int = 3, timeout: float = 30.0):
3131
# Call super class __init__ first
3232
super().__init__(max_retries=max_retries, agent_name="acknowledgment_agent")
3333
self.timeout = timeout
34-
logger.info(f"🧠 Acknowledgment agent initialized with timeout: {timeout}s")
34+
logger.info("acknowledgment_agent_initialized_with_timeout_s", timeout=timeout)
3535

3636
def _build_graph(self) -> Any:
3737
"""
@@ -63,7 +63,7 @@ async def _evaluate_node(self, state: Any) -> AgentResult:
6363
)
6464
return result
6565
except Exception as e:
66-
logger.error(f"🧠 Error in evaluation node: {e}")
66+
logger.error("error_in_evaluation_node", e=e)
6767
return AgentResult(success=False, message=f"Evaluation failed: {str(e)}", data={"error": str(e)})
6868

6969
@staticmethod
@@ -86,8 +86,8 @@ async def evaluate_acknowledgment(
8686
Intelligently evaluate an acknowledgment request based on rule descriptions and context.
8787
"""
8888
try:
89-
logger.info(f"🧠 Evaluating acknowledgment request from {commenter}")
90-
logger.info(f"🧠 Reason: {acknowledgment_reason}")
89+
logger.info("evaluating_acknowledgment_request_from", commenter=commenter)
90+
logger.info("reason", acknowledgment_reason=acknowledgment_reason)
9191
logger.info(f"🧠 Violations to evaluate: {len(violations)}")
9292

9393
# Validate inputs
@@ -102,7 +102,7 @@ async def evaluate_acknowledgment(
102102
evaluation_prompt = create_evaluation_prompt(acknowledgment_reason, violations, pr_data, commenter, rules)
103103

104104
# Get LLM evaluation with structured output
105-
logger.info("🧠 Requesting LLM evaluation with structured output...")
105+
logger.info("requesting_llm_evaluation_with_structured_output")
106106

107107
# Use the same pattern as other agents: direct get_chat_model call
108108
llm = get_chat_model(agent="acknowledgment_agent")
@@ -112,12 +112,12 @@ async def evaluate_acknowledgment(
112112
structured_result = await self._execute_with_timeout(structured_llm.ainvoke(messages), timeout=self.timeout)
113113

114114
if not structured_result:
115-
logger.error("🧠 Empty LLM response received")
115+
logger.error("empty_llm_response_received")
116116
return AgentResult(
117117
success=False, message="Empty response from LLM", data={"error": "LLM returned empty response"}
118118
)
119119

120-
logger.info("🧠 Successfully received structured LLM evaluation result")
120+
logger.info("successfully_received_structured_llm_evaluation_result")
121121

122122
# Map LLM decisions back to original violations using rule_description
123123
acknowledgable_violations = []
@@ -138,11 +138,9 @@ async def evaluate_acknowledgment(
138138
# Fallback: try to find by rule_description
139139
original_violation = self._find_violation_by_rule_description(rule_description, violations)
140140
if original_violation:
141-
logger.info(f"🧠 Found violation by rule description: '{rule_description}'")
141+
logger.info("found_violation_by_rule_description", rule_description=rule_description)
142142
else:
143-
logger.warning(
144-
f"🧠 LLM returned rule_description '{rule_description}' not found in original violations"
145-
)
143+
logger.warning("llm_returned_ruledescription_not_found_in", rule_description=rule_description)
146144

147145
if original_violation:
148146
violation_copy = original_violation.copy()
@@ -168,24 +166,22 @@ async def evaluate_acknowledgment(
168166
# Fallback: try to find by rule_description
169167
original_violation = self._find_violation_by_rule_description(rule_description, violations)
170168
if original_violation:
171-
logger.info(f"🧠 Found violation by rule description: '{rule_description}'")
169+
logger.info("found_violation_by_rule_description", rule_description=rule_description)
172170
else:
173-
logger.warning(
174-
f"🧠 LLM returned rule_description '{rule_description}' not found in original violations"
175-
)
171+
logger.warning("llm_returned_ruledescription_not_found_in", rule_description=rule_description)
176172

177173
if original_violation:
178174
violation_copy = original_violation.copy()
179175
# Add fix-specific fields
180176
violation_copy.update({"fix_reason": llm_violation.reason, "priority": llm_violation.priority})
181177
require_fixes.append(violation_copy)
182178

183-
logger.info("🧠 Intelligent evaluation completed:")
184-
logger.info(f" Valid: {structured_result.is_valid}")
185-
logger.info(f" Reasoning: {structured_result.reasoning}")
179+
logger.info("intelligent_evaluation_completed")
180+
logger.info("valid", is_valid=structured_result.is_valid)
181+
logger.info("reasoning", reasoning=structured_result.reasoning)
186182
logger.info(f" Acknowledged violations: {len(acknowledgable_violations)}")
187183
logger.info(f" Require fixes: {len(require_fixes)}")
188-
logger.info(f" Confidence: {structured_result.confidence}")
184+
logger.info("confidence", confidence=structured_result.confidence)
189185

190186
return AgentResult(
191187
success=True,
@@ -201,7 +197,7 @@ async def evaluate_acknowledgment(
201197
)
202198

203199
except Exception as e:
204-
logger.error(f"🧠 Error in acknowledgment evaluation: {e}")
200+
logger.error("error_in_acknowledgment_evaluation", e=e)
205201
import traceback
206202

207203
logger.error(f"🧠 Traceback: {traceback.format_exc()}")
@@ -228,7 +224,7 @@ async def execute(self, **kwargs: Any) -> AgentResult:
228224
rules=rules,
229225
)
230226

231-
logger.warning("🧠 execute() method called on AcknowledgmentAgent with missing arguments")
227+
logger.warning("execute_method_called_on_acknowledgmentagent_with")
232228
return AgentResult(
233229
success=False, message="AcknowledgmentAgent requires specific arguments for execute()", data={}
234230
)

src/agents/acknowledgment_agent/test_agent.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,13 @@
66
import logging
77

88
import pytest
9+
import structlog
910

1011
from src.agents.acknowledgment_agent.agent import AcknowledgmentAgent
1112

1213
# Set up logging
1314
logging.basicConfig(level=logging.INFO)
14-
logger = logging.getLogger(__name__)
15+
logger = structlog.get_logger()
1516

1617

1718
@pytest.mark.asyncio
@@ -72,7 +73,7 @@ async def test_acknowledgment_agent() -> None:
7273
},
7374
]
7475

75-
logger.info("🧠 Testing Intelligent Acknowledgment Agent...")
76+
logger.info("testing_intelligent_acknowledgment_agent")
7677

7778
try:
7879
# Test evaluation
@@ -85,7 +86,7 @@ async def test_acknowledgment_agent() -> None:
8586
)
8687

8788
if result.success:
88-
logger.info("✅ Acknowledgment evaluation completed successfully")
89+
logger.info("acknowledgment_evaluation_completed_successfully")
8990
logger.info(f" Valid: {result.data.get('is_valid', False)}")
9091
logger.info(f" Reasoning: {result.data.get('reasoning', 'No reasoning')}")
9192
logger.info(f" Acknowledged violations: {len(result.data.get('acknowledgable_violations', []))}")
@@ -94,25 +95,25 @@ async def test_acknowledgment_agent() -> None:
9495

9596
# Print detailed results
9697
if result.data.get("acknowledgable_violations"):
97-
logger.info("\n📋 Acknowledged Violations:")
98+
logger.info("n_acknowledged_violations")
9899
for violation in result.data["acknowledgable_violations"]:
99100
logger.info(f" • {violation.get('rule_name')} - {violation.get('reason')}")
100101

101102
if result.data.get("require_fixes"):
102-
logger.info("\n⚠️ Violations Requiring Fixes:")
103+
logger.info("n_violations_requiring_fixes")
103104
for violation in result.data["require_fixes"]:
104105
logger.info(f" • {violation.get('rule_name')} - {violation.get('reason')}")
105106

106107
if result.data.get("recommendations"):
107-
logger.info("\n💡 Recommendations:")
108+
logger.info("n_recommendations")
108109
for rec in result.data["recommendations"]:
109-
logger.info(f" • {rec}")
110+
logger.info("event", rec=rec)
110111

111112
else:
112-
logger.error(f"❌ Acknowledgment evaluation failed: {result.message}")
113+
logger.error("acknowledgment_evaluation_failed", message=result.message)
113114

114115
except Exception as e:
115-
logger.error(f"❌ Test failed with error: {e}")
116+
logger.error("test_failed_with_error", e=e)
116117

117118

118119
if __name__ == "__main__":

src/agents/base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,15 @@
22
Base agent classes and utilities for agents.
33
"""
44

5-
import logging
65
from abc import ABC, abstractmethod
76
from typing import Any, TypeVar, cast
87

8+
import structlog
99
from pydantic import BaseModel, Field
1010

1111
from src.core.utils.timeout import execute_with_timeout
1212

13-
logger = logging.getLogger(__name__)
13+
logger = structlog.get_logger()
1414

1515
T = TypeVar("T")
1616

@@ -44,7 +44,7 @@ def __init__(self, max_retries: int = 3, retry_delay: float = 1.0, agent_name: s
4444

4545
self.llm = get_chat_model(agent=agent_name)
4646
self.graph = self._build_graph()
47-
logger.info(f"🔧 {self.__class__.__name__} initialized with max_retries={max_retries}, agent_name={agent_name}")
47+
logger.info("initialized_with_maxretries_agentname", __name__=self.__class__.__name__, max_retries=max_retries, agent_name=agent_name)
4848

4949
@abstractmethod
5050
def _build_graph(self) -> Any:

src/agents/engine_agent/agent.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@
44
Focuses on rule descriptions and parameters, using fast validators with LLM reasoning as fallback.
55
"""
66

7-
import logging
87
import time
98
from typing import Any
109

10+
import structlog
1111
from langgraph.graph import END, START, StateGraph
1212

1313
from src.agents.base import AgentResult, BaseAgent
@@ -29,7 +29,7 @@
2929
)
3030
from src.rules.registry import AVAILABLE_CONDITIONS
3131

32-
logger = logging.getLogger(__name__)
32+
logger = structlog.get_logger()
3333

3434

3535
class RuleEngineAgent(BaseAgent):
@@ -48,9 +48,9 @@ def __init__(self, max_retries: int = 3, timeout: float = 300.0):
4848
super().__init__(max_retries=max_retries, agent_name="engine_agent")
4949
self.timeout = timeout
5050

51-
logger.info("🔧 Rule Engine agent initializing...")
51+
logger.info("rule_engine_agent_initializing")
5252
logger.info(f"🔧 Available validators: {len(AVAILABLE_CONDITIONS)}")
53-
logger.info("🔧 Validation strategy: Hybrid (validators + LLM fallback)")
53+
logger.info("validation_strategy_hybrid_validators_llm_fallback")
5454

5555
def _build_graph(self) -> Any:
5656
"""Build the LangGraph workflow for hybrid rule evaluation."""
@@ -118,13 +118,13 @@ async def execute(self, **kwargs: Any) -> AgentResult:
118118
llm_usage=0,
119119
)
120120

121-
logger.info("🔧 Rule Engine initial state prepared")
121+
logger.info("rule_engine_initial_state_prepared")
122122

123123
# Run the hybrid graph with timeout
124124
result = await self._execute_with_timeout(self.graph.ainvoke(initial_state), timeout=self.timeout)
125125

126126
execution_time = time.time() - start_time
127-
logger.info(f"🔧 Rule Engine evaluation completed in {execution_time:.2f}s")
127+
logger.info("rule_engine_evaluation_completed_in_s")
128128

129129
# Extract violations from result
130130
violations = []
@@ -164,9 +164,9 @@ async def execute(self, **kwargs: Any) -> AgentResult:
164164
llm_usage=result.llm_usage if hasattr(result, "llm_usage") else 0,
165165
)
166166

167-
logger.info("🔧 Rule Engine evaluation completed successfully")
168-
logger.info(f"🔧 Validator usage: {evaluation_result.validator_usage}")
169-
logger.info(f"🔧 LLM usage: {evaluation_result.llm_usage} calls")
167+
logger.info("rule_engine_evaluation_completed_successfully")
168+
logger.info("validator_usage", validator_usage=evaluation_result.validator_usage)
169+
logger.info("llm_usage_calls", llm_usage=evaluation_result.llm_usage)
170170

171171
return AgentResult(
172172
success=len(violations) == 0,
@@ -181,7 +181,7 @@ async def execute(self, **kwargs: Any) -> AgentResult:
181181
)
182182
except Exception as e:
183183
execution_time = time.time() - start_time
184-
logger.error(f"🔧 Error in Rule Engine evaluation: {e}")
184+
logger.error("error_in_rule_engine_evaluation", e=e)
185185
return AgentResult(
186186
success=False,
187187
message=f"Rule Engine evaluation failed: {str(e)}",
@@ -271,5 +271,5 @@ async def evaluate(
271271

272272
async def evaluate_pull_request(self, rules: list[Any], event_data: dict[str, Any]) -> dict[str, Any]:
273273
"""Legacy method for backwards compatibility."""
274-
logger.warning("evaluate_pull_request is deprecated. Use evaluate() with event_type='pull_request'")
274+
logger.warning("evaluatepullrequest_is_deprecated_use_evaluate_with")
275275
return await self.evaluate("pull_request", rules, event_data, "")

src/agents/engine_agent/models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
from __future__ import annotations
66

7-
from enum import Enum
7+
from enum import StrEnum
88
from typing import Any
99

1010
from pydantic import BaseModel, ConfigDict, Field
@@ -24,7 +24,7 @@ class EngineRequest(BaseModel):
2424
model_config = ConfigDict(arbitrary_types_allowed=True)
2525

2626

27-
class ValidationStrategy(str, Enum):
27+
class ValidationStrategy(StrEnum):
2828
"""Validation strategies for rule evaluation."""
2929

3030
VALIDATOR = "validator" # Use fast validator

0 commit comments

Comments
 (0)