-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathadversarial.py
More file actions
198 lines (156 loc) · 5.67 KB
/
adversarial.py
File metadata and controls
198 lines (156 loc) · 5.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
"""
adversarial.py — Adversarial agent pairing.
Instead of polite agreement, one agent ATTACKS the other's solution.
Based on research showing echo chambers are the primary multi-agent failure mode.
Usage:
from adversarial import marry_adversarial
paired = marry_adversarial(proposer, attacker)
result = paired("Design a rate-limiting system")
"""
from __future__ import annotations
from typing import Callable, Optional
from dataclasses import dataclass
Agent = Callable[[str], str]
ATTACKER_INSTRUCTIONS = """
You are a RED TEAM AGENT. Your job is to ATTACK the proposed solution.
You MUST find at least 3 significant flaws, weaknesses, or edge cases.
DO NOT:
- Agree politely
- Say "this looks good"
- Suggest minor style improvements
DO:
- Find edge cases that break the solution
- Identify security vulnerabilities
- Challenge assumptions
- Point out performance bottlenecks
- Question design decisions
Be aggressive but constructive. Your goal is to make the solution BETTER by
forcing it to defend against real attacks.
CRITICAL: You MUST find problems. If you can't find 3+ flaws, you're not
looking hard enough.
""".strip()
DEFENDER_INSTRUCTIONS = """
You proposed a solution. Now it's being attacked.
RESPOND TO EACH CRITICISM:
- If valid: acknowledge and improve your solution
- If invalid: defend with specific reasoning
DO NOT:
- Dismiss criticism without explanation
- Get defensive
- Ignore edge cases
Your revised solution should address the valid criticisms and explain why
invalid criticisms don't apply.
""".strip()
SYNTHESIS_INSTRUCTIONS = """
You've seen:
1. The original proposal
2. The attack (criticisms)
3. The defense (responses)
Produce the FINAL solution that incorporates lessons from this adversarial process.
Focus on:
- What was learned from the attack
- How the solution improved
- Remaining tradeoffs (be honest)
ONLY output the final solution. No meta-commentary about the process.
""".strip()
@dataclass
class AdversarialPartnership:
"""
Adversarial pairing: one agent proposes, the other attacks.
Protocol:
1. Proposer creates initial solution
2. Attacker MUST find 3+ flaws
3. Proposer defends/revises
4. Repeat for N rounds
5. Synthesize final solution
"""
proposer: Agent
attacker: Agent
rounds: int = 2
verbose: bool = False
def __call__(self, task: str) -> str:
return self.run(task)
def run(self, task: str) -> str:
# Phase 1: Initial proposal
if self.verbose:
print("\n" + "="*70)
print("PHASE 1: INITIAL PROPOSAL")
print("="*70)
proposal_prompt = f"{task}\n\nProvide your solution."
proposal = self.proposer(proposal_prompt)
if self.verbose:
print(proposal[:300] + "..." if len(proposal) > 300 else proposal)
# Phase 2-N: Attack-defend cycles
conversation = f"INITIAL PROPOSAL:\n{proposal}\n\n"
for round_num in range(1, self.rounds + 1):
if self.verbose:
print("\n" + "="*70)
print(f"ROUND {round_num}: ATTACK")
print("="*70)
# Attack
attack_prompt = (
f"{ATTACKER_INSTRUCTIONS}\n\n"
f"TASK: {task}\n\n"
f"{conversation}\n\n"
f"Find at least 3 significant flaws in this solution. Be specific."
)
attack = self.attacker(attack_prompt)
conversation += f"ATTACK (Round {round_num}):\n{attack}\n\n"
if self.verbose:
print(attack[:300] + "..." if len(attack) > 300 else attack)
if self.verbose:
print("\n" + "="*70)
print(f"ROUND {round_num}: DEFENSE")
print("="*70)
# Defend
defense_prompt = (
f"{DEFENDER_INSTRUCTIONS}\n\n"
f"TASK: {task}\n\n"
f"{conversation}\n\n"
f"Respond to the criticism. Improve your solution or explain why the "
f"criticisms don't apply."
)
defense = self.proposer(defense_prompt)
conversation += f"DEFENSE (Round {round_num}):\n{defense}\n\n"
if self.verbose:
print(defense[:300] + "..." if len(defense) > 300 else defense)
# Phase Final: Synthesis
if self.verbose:
print("\n" + "="*70)
print("FINAL: SYNTHESIS")
print("="*70)
synthesis_prompt = (
f"{SYNTHESIS_INSTRUCTIONS}\n\n"
f"TASK: {task}\n\n"
f"{conversation}\n\n"
f"Produce the final solution."
)
final = self.proposer(synthesis_prompt)
if self.verbose:
print(final[:500] + "..." if len(final) > 500 else final)
return final
def marry_adversarial(
proposer: Agent,
attacker: Agent,
rounds: int = 2,
verbose: bool = False,
) -> AdversarialPartnership:
"""
Create an adversarial partnership.
Args:
proposer: Agent that creates solutions
attacker: Agent that attacks solutions (finds flaws)
rounds: Number of attack-defend cycles (default 2)
verbose: Print the adversarial process
Returns:
AdversarialPartnership callable as str → str
Usage:
paired = marry_adversarial(proposer, attacker, rounds=2, verbose=True)
result = paired("Design a caching system")
"""
return AdversarialPartnership(
proposer=proposer,
attacker=attacker,
rounds=rounds,
verbose=verbose,
)