-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmemory.py
More file actions
238 lines (186 loc) · 7.27 KB
/
memory.py
File metadata and controls
238 lines (186 loc) · 7.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
"""
Dual-timescale adapters and episodic memory modules
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class DualTimescaleAdapter(nn.Module):
"""Fast and Slow adapters for temporal features"""
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int,
timescale: str = 'fast', num_layers: int = 2):
super().__init__()
self.timescale = timescale
# LSTM for temporal processing
self.adapter = nn.LSTM(
input_dim,
hidden_dim,
num_layers=num_layers,
batch_first=True,
dropout=0.1 if num_layers > 1 else 0
)
# Projection layer
self.projection = nn.Linear(hidden_dim, output_dim)
# Layer normalization
self.layer_norm = nn.LayerNorm(output_dim)
def forward(self, x, hidden=None):
"""
Process temporal sequence based on timescale
Args:
x: Input tensor of shape (batch, seq_len, features)
hidden: Hidden state from previous step
Returns:
Processed temporal features
"""
out, hidden = self.adapter(x, hidden)
# Different aggregation strategies for fast vs slow
if self.timescale == 'fast':
# Take last timestep for fast adapter
out = out[:, -1, :]
else:
# Average pooling for slow adapter
out = out.mean(dim=1)
out = self.projection(out)
out = self.layer_norm(out)
return out
class AdaptiveMixing(nn.Module):
"""Module for adaptively mixing fast and slow memory features"""
def __init__(self, z_dim: int, hidden_dim: int):
super().__init__()
self.mixing_network = nn.Sequential(
nn.Linear(z_dim * 2, hidden_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_dim, hidden_dim // 2),
nn.ReLU(),
nn.Linear(hidden_dim // 2, 1),
nn.Sigmoid()
)
def forward(self, z_c, z_t, m_fast, m_slow):
"""
Compute adaptive mixing weight and combine memory features
Args:
z_c: Context encoding
z_t: Time encoding
m_fast: Fast memory features
m_slow: Slow memory features
Returns:
Mixed memory features
"""
# Compute mixing weight
alpha_input = torch.cat([z_c, z_t], dim=-1)
alpha = self.mixing_network(alpha_input)
# Mix memory features
mixed = alpha * m_fast + (1 - alpha) * m_slow
return mixed, alpha
class EpisodicMemory(nn.Module):
"""Episodic memory with attention-based retrieval and gated writing"""
def __init__(self, memory_size: int, d_model: int, num_heads: int = 8):
super().__init__()
self.memory_size = memory_size
self.d_model = d_model
# Initialize memory banks
self.register_buffer('memory', torch.zeros(memory_size, d_model))
self.register_buffer('memory_mask', torch.zeros(memory_size, 1))
self.register_buffer('memory_age', torch.zeros(memory_size, 1))
self.current_idx = 0
self.total_writes = 0
# Attention mechanism for retrieval
self.attention = nn.MultiheadAttention(
d_model,
num_heads=num_heads,
dropout=0.1,
batch_first=True
)
# Key and value projections for memory
self.key_proj = nn.Linear(d_model, d_model)
self.value_proj = nn.Linear(d_model, d_model)
def write(self, state, gate):
"""
Write to memory with gating mechanism
Args:
state: State tensor to write
gate: Write gate values (0-1)
"""
batch_size = state.shape[0]
for i in range(batch_size):
if gate[i] > 0.5: # Write threshold
# Write to current position
self.memory[self.current_idx] = state[i].detach()
self.memory_mask[self.current_idx] = 1
self.memory_age[self.current_idx] = self.total_writes
# Update index (circular buffer)
self.current_idx = (self.current_idx + 1) % self.memory_size
self.total_writes += 1
def retrieve(self, query):
"""
Retrieve from memory using attention mechanism
Args:
query: Query tensor for retrieval
Returns:
Retrieved memory content
"""
if self.memory_mask.sum() == 0:
return torch.zeros_like(query)
# Get valid memory entries
valid_mask = self.memory_mask.squeeze() > 0
valid_memory = self.memory[valid_mask]
if len(valid_memory) == 0:
return torch.zeros_like(query)
# Project memory for keys and values
memory_keys = self.key_proj(valid_memory)
memory_values = self.value_proj(valid_memory)
# Expand for batch processing
batch_size = query.shape[0]
memory_keys = memory_keys.unsqueeze(0).expand(batch_size, -1, -1)
memory_values = memory_values.unsqueeze(0).expand(batch_size, -1, -1)
query_expanded = query.unsqueeze(1)
# Apply attention
attended, attention_weights = self.attention(
query_expanded,
memory_keys,
memory_values
)
return attended.squeeze(1)
def get_memory_stats(self):
"""Get statistics about memory usage"""
num_entries = self.memory_mask.sum().item()
avg_age = self.memory_age[self.memory_mask.squeeze() > 0].mean().item() if num_entries > 0 else 0
return {
'num_entries': num_entries,
'capacity': self.memory_size,
'utilization': num_entries / self.memory_size,
'avg_age': avg_age,
'total_writes': self.total_writes
}
class WriteGate(nn.Module):
"""Gate network for controlling episodic memory writes"""
def __init__(self, z_dim: int, memory_dim: int, hidden_dim: int):
super().__init__()
# Gate network
# Input: [z, M, epoch_progress]
input_dim = z_dim + memory_dim + 1
self.gate_network = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_dim, hidden_dim // 2),
nn.ReLU(),
nn.Linear(hidden_dim // 2, 1),
nn.Sigmoid()
)
def forward(self, z, M, epoch_progress):
"""
Compute write gate value
Args:
z: Latent representation
M: Mixed memory features
epoch_progress: Training progress (0-1)
Returns:
Gate value (0-1)
"""
# Prepare input
batch_size = z.shape[0]
e = torch.full((batch_size, 1), epoch_progress, device=z.device)
gate_input = torch.cat([z, M, e], dim=-1)
gate = self.gate_network(gate_input)
return gate