style(memory): apply ruff formatting and linting fixes

Auto-fixed linting errors and formatting issues:
- Removed unused imports (F401): pytest, Any, AnalysisType, MemoryType, OutcomeType
- Removed unused variable (F841): hooks variable in test
- Applied consistent formatting across memory service and test files

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-05 14:07:48 +01:00
parent e3fe0439fd
commit cf6291ac8e
17 changed files with 236 additions and 185 deletions

View File

@@ -344,7 +344,12 @@ class BudgetAllocator:
Rebalanced budget
"""
if prioritize is None:
prioritize = [ContextType.KNOWLEDGE, ContextType.MEMORY, ContextType.TASK, ContextType.SYSTEM]
prioritize = [
ContextType.KNOWLEDGE,
ContextType.MEMORY,
ContextType.TASK,
ContextType.SYSTEM,
]
# Calculate unused tokens per type
unused: dict[str, int] = {}

View File

@@ -50,7 +50,9 @@ class CacheStats:
"embedding_cache": self.embedding_cache,
"retrieval_cache": self.retrieval_cache,
"overall_hit_rate": self.overall_hit_rate,
"last_cleanup": self.last_cleanup.isoformat() if self.last_cleanup else None,
"last_cleanup": self.last_cleanup.isoformat()
if self.last_cleanup
else None,
"cleanup_count": self.cleanup_count,
}
@@ -104,7 +106,8 @@ class CacheManager:
else:
self._embedding_cache = create_embedding_cache(
max_size=self._settings.cache_max_items,
default_ttl_seconds=self._settings.cache_ttl_seconds * 12, # 1hr for embeddings
default_ttl_seconds=self._settings.cache_ttl_seconds
* 12, # 1hr for embeddings
redis=redis,
)
@@ -271,7 +274,9 @@ class CacheManager:
# Invalidate retrieval cache
if self._retrieval_cache:
uuid_id = UUID(str(memory_id)) if not isinstance(memory_id, UUID) else memory_id
uuid_id = (
UUID(str(memory_id)) if not isinstance(memory_id, UUID) else memory_id
)
count += self._retrieval_cache.invalidate_by_memory(uuid_id)
logger.debug(f"Invalidated {count} cache entries for {memory_type}:{memory_id}")

View File

@@ -405,9 +405,7 @@ class EmbeddingCache:
count = 0
with self._lock:
keys_to_remove = [
k for k, v in self._cache.items() if v.model == model
]
keys_to_remove = [k for k, v in self._cache.items() if v.model == model]
for key in keys_to_remove:
del self._cache[key]
count += 1
@@ -454,9 +452,7 @@ class EmbeddingCache:
Number of entries removed
"""
with self._lock:
keys_to_remove = [
k for k, v in self._cache.items() if v.is_expired()
]
keys_to_remove = [k for k, v in self._cache.items() if v.is_expired()]
for key in keys_to_remove:
del self._cache[key]
self._stats.expirations += 1

View File

@@ -384,9 +384,7 @@ class HotMemoryCache[T]:
Number of entries removed
"""
with self._lock:
keys_to_remove = [
k for k, v in self._cache.items() if v.is_expired()
]
keys_to_remove = [k for k, v in self._cache.items() if v.is_expired()]
for key in keys_to_remove:
del self._cache[key]
self._stats.expirations += 1

View File

@@ -321,10 +321,7 @@ class MemoryContextSource:
min_confidence=min_relevance,
)
return [
MemoryContext.from_semantic_memory(fact, query=query)
for fact in facts
]
return [MemoryContext.from_semantic_memory(fact, query=query) for fact in facts]
async def _fetch_procedural(
self,

View File

@@ -287,7 +287,9 @@ class AgentLifecycleManager:
# Get all current state
all_keys = await working.list_keys()
# Filter out checkpoint keys
state_keys = [k for k in all_keys if not k.startswith(self.CHECKPOINT_PREFIX)]
state_keys = [
k for k in all_keys if not k.startswith(self.CHECKPOINT_PREFIX)
]
state: dict[str, Any] = {}
for key in state_keys:
@@ -483,7 +485,9 @@ class AgentLifecycleManager:
# Gather session state for consolidation
all_keys = await working.list_keys()
state_keys = [k for k in all_keys if not k.startswith(self.CHECKPOINT_PREFIX)]
state_keys = [
k for k in all_keys if not k.startswith(self.CHECKPOINT_PREFIX)
]
session_state: dict[str, Any] = {}
for key in state_keys:
@@ -597,14 +601,16 @@ class AgentLifecycleManager:
for key in all_keys:
if key.startswith(self.CHECKPOINT_PREFIX):
checkpoint_id = key[len(self.CHECKPOINT_PREFIX):]
checkpoint_id = key[len(self.CHECKPOINT_PREFIX) :]
checkpoint = await working.get(key)
if checkpoint:
checkpoints.append({
"checkpoint_id": checkpoint_id,
"timestamp": checkpoint.get("timestamp"),
"keys_count": checkpoint.get("keys_count", 0),
})
checkpoints.append(
{
"checkpoint_id": checkpoint_id,
"timestamp": checkpoint.get("timestamp"),
"keys_count": checkpoint.get("keys_count", 0),
}
)
# Sort by timestamp (newest first)
checkpoints.sort(

View File

@@ -414,12 +414,14 @@ class MemoryToolService:
if args.query.lower() in key.lower():
value = await working.get(key)
if value is not None:
results.append({
"type": "working",
"key": key,
"content": str(value),
"relevance": 1.0,
})
results.append(
{
"type": "working",
"key": key,
"content": str(value),
"relevance": 1.0,
}
)
elif memory_type == MemoryType.EPISODIC:
episodic = await self._get_episodic()
@@ -430,14 +432,18 @@ class MemoryToolService:
agent_instance_id=context.agent_instance_id,
)
for episode in episodes:
results.append({
"type": "episodic",
"id": str(episode.id),
"summary": episode.task_description,
"outcome": episode.outcome.value if episode.outcome else None,
"occurred_at": episode.occurred_at.isoformat(),
"relevance": episode.importance_score,
})
results.append(
{
"type": "episodic",
"id": str(episode.id),
"summary": episode.task_description,
"outcome": episode.outcome.value
if episode.outcome
else None,
"occurred_at": episode.occurred_at.isoformat(),
"relevance": episode.importance_score,
}
)
elif memory_type == MemoryType.SEMANTIC:
semantic = await self._get_semantic()
@@ -448,15 +454,17 @@ class MemoryToolService:
min_confidence=args.min_relevance,
)
for fact in facts:
results.append({
"type": "semantic",
"id": str(fact.id),
"subject": fact.subject,
"predicate": fact.predicate,
"object": fact.object,
"confidence": fact.confidence,
"relevance": fact.confidence,
})
results.append(
{
"type": "semantic",
"id": str(fact.id),
"subject": fact.subject,
"predicate": fact.predicate,
"object": fact.object,
"confidence": fact.confidence,
"relevance": fact.confidence,
}
)
elif memory_type == MemoryType.PROCEDURAL:
procedural = await self._get_procedural()
@@ -467,15 +475,17 @@ class MemoryToolService:
limit=args.limit,
)
for proc in procedures:
results.append({
"type": "procedural",
"id": str(proc.id),
"name": proc.name,
"trigger": proc.trigger_pattern,
"success_rate": proc.success_rate,
"steps_count": len(proc.steps) if proc.steps else 0,
"relevance": proc.success_rate,
})
results.append(
{
"type": "procedural",
"id": str(proc.id),
"name": proc.name,
"trigger": proc.trigger_pattern,
"success_rate": proc.success_rate,
"steps_count": len(proc.steps) if proc.steps else 0,
"relevance": proc.success_rate,
}
)
# Sort by relevance and limit
results.sort(key=lambda x: x.get("relevance", 0), reverse=True)
@@ -601,7 +611,11 @@ class MemoryToolService:
if ep.task_type:
task_types[ep.task_type] = task_types.get(ep.task_type, 0) + 1
if ep.outcome:
outcome_val = ep.outcome.value if hasattr(ep.outcome, "value") else str(ep.outcome)
outcome_val = (
ep.outcome.value
if hasattr(ep.outcome, "value")
else str(ep.outcome)
)
outcomes[outcome_val] = outcomes.get(outcome_val, 0) + 1
# Sort by frequency
@@ -613,11 +627,13 @@ class MemoryToolService:
examples = []
if args.include_examples:
for ep in episodes[: min(3, args.max_items)]:
examples.append({
"summary": ep.task_description,
"task_type": ep.task_type,
"outcome": ep.outcome.value if ep.outcome else None,
})
examples.append(
{
"summary": ep.task_description,
"task_type": ep.task_type,
"outcome": ep.outcome.value if ep.outcome else None,
}
)
return {
"analysis_type": "recent_patterns",
@@ -661,11 +677,13 @@ class MemoryToolService:
examples = []
if args.include_examples:
for ep in successful[: min(3, args.max_items)]:
examples.append({
"summary": ep.task_description,
"task_type": ep.task_type,
"lessons": ep.lessons_learned,
})
examples.append(
{
"summary": ep.task_description,
"task_type": ep.task_type,
"lessons": ep.lessons_learned,
}
)
return {
"analysis_type": "success_factors",
@@ -694,9 +712,7 @@ class MemoryToolService:
failure_by_task[task].append(ep)
# Most common failure types
failure_counts = {
task: len(eps) for task, eps in failure_by_task.items()
}
failure_counts = {task: len(eps) for task, eps in failure_by_task.items()}
top_failures = sorted(failure_counts.items(), key=lambda x: x[1], reverse=True)[
: args.max_items
]
@@ -704,12 +720,14 @@ class MemoryToolService:
examples = []
if args.include_examples:
for ep in failed[: min(3, args.max_items)]:
examples.append({
"summary": ep.task_description,
"task_type": ep.task_type,
"lessons": ep.lessons_learned,
"error": ep.outcome_details,
})
examples.append(
{
"summary": ep.task_description,
"task_type": ep.task_type,
"lessons": ep.lessons_learned,
"error": ep.outcome_details,
}
)
return {
"analysis_type": "failure_patterns",
@@ -794,15 +812,21 @@ class MemoryToolService:
insights = []
if top_tasks:
insights.append(f"Most common task type: {top_tasks[0][0]} ({top_tasks[0][1]} occurrences)")
insights.append(
f"Most common task type: {top_tasks[0][0]} ({top_tasks[0][1]} occurrences)"
)
total = sum(outcome_dist.values())
if total > 0:
success_rate = outcome_dist.get("success", 0) / total
if success_rate > 0.8:
insights.append("High success rate observed - current approach is working well")
insights.append(
"High success rate observed - current approach is working well"
)
elif success_rate < 0.5:
insights.append("Success rate below 50% - consider reviewing procedures")
insights.append(
"Success rate below 50% - consider reviewing procedures"
)
return insights
@@ -839,9 +863,13 @@ class MemoryToolService:
if top_failures:
worst_task, count = top_failures[0]
tips.append(f"'{worst_task}' has most failures ({count}) - needs procedure review")
tips.append(
f"'{worst_task}' has most failures ({count}) - needs procedure review"
)
tips.append("Review lessons_learned from past failures before attempting similar tasks")
tips.append(
"Review lessons_learned from past failures before attempting similar tasks"
)
return tips
@@ -912,7 +940,11 @@ class MemoryToolService:
outcomes = {"success": 0, "failure": 0, "partial": 0, "abandoned": 0}
for ep in recent_episodes:
if ep.outcome:
key = ep.outcome.value if hasattr(ep.outcome, "value") else str(ep.outcome)
key = (
ep.outcome.value
if hasattr(ep.outcome, "value")
else str(ep.outcome)
)
if key in outcomes:
outcomes[key] += 1
@@ -942,7 +974,8 @@ class MemoryToolService:
# Filter by minimum success rate if specified
procedures = [
p for p in all_procedures
p
for p in all_procedures
if args.min_success_rate is None or p.success_rate >= args.min_success_rate
][: args.limit]

View File

@@ -441,9 +441,7 @@ class MemoryMetrics:
# Get hits/misses by cache type
for labels_str, hits in self._counters["memory_cache_hits_total"].items():
cache_type = self._parse_labels(labels_str).get(
"cache_type", "unknown"
)
cache_type = self._parse_labels(labels_str).get("cache_type", "unknown")
if cache_type not in stats:
stats[cache_type] = {"hits": 0, "misses": 0}
stats[cache_type]["hits"] = hits
@@ -451,9 +449,7 @@ class MemoryMetrics:
for labels_str, misses in self._counters[
"memory_cache_misses_total"
].items():
cache_type = self._parse_labels(labels_str).get(
"cache_type", "unknown"
)
cache_type = self._parse_labels(labels_str).get("cache_type", "unknown")
if cache_type not in stats:
stats[cache_type] = {"hits": 0, "misses": 0}
stats[cache_type]["misses"] = misses

View File

@@ -149,8 +149,7 @@ class MemoryReflection:
# Filter to time range
episodes = [
e for e in episodes
if time_range.start <= e.occurred_at <= time_range.end
e for e in episodes if time_range.start <= e.occurred_at <= time_range.end
]
if not episodes:
@@ -313,7 +312,9 @@ class MemoryReflection:
f"Task type '{task_type}': {success_rate:.0%} success rate, "
f"avg {avg_duration:.1f}s duration, {avg_tokens:.0f} tokens"
),
confidence=min(1.0, stats["total"] / 10), # Higher sample = higher confidence
confidence=min(
1.0, stats["total"] / 10
), # Higher sample = higher confidence
occurrence_count=stats["total"],
episode_ids=[e.id for e in stats["episodes"]],
first_seen=min(e.occurred_at for e in stats["episodes"]),
@@ -397,7 +398,9 @@ class MemoryReflection:
failed = [e for e in episodes if e.outcome == Outcome.FAILURE]
if len(successful) >= 3 and len(failed) >= 3:
avg_success_duration = statistics.mean(e.duration_seconds for e in successful)
avg_success_duration = statistics.mean(
e.duration_seconds for e in successful
)
avg_failure_duration = statistics.mean(e.duration_seconds for e in failed)
if avg_failure_duration > avg_success_duration * 1.5:
@@ -409,7 +412,7 @@ class MemoryReflection:
description=(
f"Failed tasks average {avg_failure_duration:.1f}s vs "
f"{avg_success_duration:.1f}s for successful tasks "
f"({avg_failure_duration/avg_success_duration:.1f}x longer)"
f"({avg_failure_duration / avg_success_duration:.1f}x longer)"
),
confidence=0.8,
occurrence_count=len(successful) + len(failed),
@@ -427,9 +430,15 @@ class MemoryReflection:
# Analyze token efficiency
if len(successful) >= 3:
avg_tokens = statistics.mean(e.tokens_used for e in successful)
std_tokens = statistics.stdev(e.tokens_used for e in successful) if len(successful) > 1 else 0
std_tokens = (
statistics.stdev(e.tokens_used for e in successful)
if len(successful) > 1
else 0
)
efficient = [e for e in successful if e.tokens_used < avg_tokens - std_tokens]
efficient = [
e for e in successful if e.tokens_used < avg_tokens - std_tokens
]
if len(efficient) >= self._config.min_pattern_occurrences:
patterns.append(
Pattern(
@@ -508,8 +517,7 @@ class MemoryReflection:
# Filter to time range
episodes = [
e for e in episodes
if time_range.start <= e.occurred_at <= time_range.end
e for e in episodes if time_range.start <= e.occurred_at <= time_range.end
]
if len(episodes) < self._config.min_sample_size_for_factor:
@@ -652,9 +660,7 @@ class MemoryReflection:
avg_success_duration = statistics.mean(
e.duration_seconds for e in successful
)
avg_failure_duration = statistics.mean(
e.duration_seconds for e in failed
)
avg_failure_duration = statistics.mean(e.duration_seconds for e in failed)
if avg_success_duration > 0:
duration_ratio = avg_failure_duration / avg_success_duration
@@ -837,7 +843,9 @@ class MemoryReflection:
baseline_durations = [e.duration_seconds for e in baseline]
baseline_mean = statistics.mean(baseline_durations)
baseline_std = statistics.stdev(baseline_durations) if len(baseline_durations) > 1 else 0
baseline_std = (
statistics.stdev(baseline_durations) if len(baseline_durations) > 1 else 0
)
if baseline_std == 0:
return anomalies
@@ -997,7 +1005,10 @@ class MemoryReflection:
) / len(recent)
# Detect significant failure rate increase
if recent_failure_rate > baseline_failure_rate * 1.5 and recent_failure_rate > 0.3:
if (
recent_failure_rate > baseline_failure_rate * 1.5
and recent_failure_rate > 0.3
):
rate_increase = recent_failure_rate / max(baseline_failure_rate, 0.01)
anomalies.append(
@@ -1074,14 +1085,11 @@ class MemoryReflection:
insights.extend(self._insights_from_anomalies(anomalies))
# Generate cross-cutting insights
insights.extend(
self._generate_cross_insights(patterns, factors, anomalies)
)
insights.extend(self._generate_cross_insights(patterns, factors, anomalies))
# Filter by confidence and sort by priority
insights = [
i for i in insights
if i.confidence >= self._config.min_insight_confidence
i for i in insights if i.confidence >= self._config.min_insight_confidence
]
insights.sort(key=lambda i: -i.priority)
@@ -1182,9 +1190,7 @@ class MemoryReflection:
source_patterns=[],
source_factors=[f.id for f in top_positive],
source_anomalies=[],
recommended_actions=[
f"Reinforce: {f.name}" for f in top_positive
],
recommended_actions=[f"Reinforce: {f.name}" for f in top_positive],
generated_at=_utcnow(),
metadata={
"factors": [f.to_dict() for f in top_positive],
@@ -1200,17 +1206,16 @@ class MemoryReflection:
insight_type=InsightType.WARNING,
title="Factors correlating with failure",
description=(
"Risky factors: "
+ ", ".join(f.name for f in top_negative)
"Risky factors: " + ", ".join(f.name for f in top_negative)
),
priority=0.75,
confidence=statistics.mean(abs(f.correlation) for f in top_negative),
confidence=statistics.mean(
abs(f.correlation) for f in top_negative
),
source_patterns=[],
source_factors=[f.id for f in top_negative],
source_anomalies=[],
recommended_actions=[
f"Mitigate: {f.name}" for f in top_negative
],
recommended_actions=[f"Mitigate: {f.name}" for f in top_negative],
generated_at=_utcnow(),
metadata={
"factors": [f.to_dict() for f in top_negative],
@@ -1254,8 +1259,7 @@ class MemoryReflection:
)
failure_rate_anomalies = [
a for a in anomalies
if a.anomaly_type == AnomalyType.UNUSUAL_FAILURE_RATE
a for a in anomalies if a.anomaly_type == AnomalyType.UNUSUAL_FAILURE_RATE
]
if failure_rate_anomalies:
for anomaly in failure_rate_anomalies:
@@ -1295,7 +1299,13 @@ class MemoryReflection:
total_items = len(patterns) + len(factors) + len(anomalies)
if total_items > 0:
warning_count = (
len([p for p in patterns if p.pattern_type == PatternType.RECURRING_FAILURE])
len(
[
p
for p in patterns
if p.pattern_type == PatternType.RECURRING_FAILURE
]
)
+ len([a for a in anomalies if a.is_critical])
+ len([f for f in factors if f.correlation < -0.3])
)
@@ -1312,13 +1322,19 @@ class MemoryReflection:
f"Found {warning_count} warning indicators."
),
priority=0.6,
confidence=min(1.0, total_items / 20), # Higher sample = higher confidence
confidence=min(
1.0, total_items / 20
), # Higher sample = higher confidence
source_patterns=[p.id for p in patterns[:5]],
source_factors=[f.id for f in factors[:5]],
source_anomalies=[a.id for a in anomalies[:5]],
recommended_actions=(
["Continue current practices"] if health_score > 0.7
else ["Review warnings and address issues", "Focus on improvement areas"]
["Continue current practices"]
if health_score > 0.7
else [
"Review warnings and address issues",
"Focus on improvement areas",
]
),
generated_at=_utcnow(),
metadata={
@@ -1374,8 +1390,7 @@ class MemoryReflection:
agent_instance_id=agent_instance_id,
)
episodes_in_range = [
e for e in episodes
if time_range.start <= e.occurred_at <= time_range.end
e for e in episodes if time_range.start <= e.occurred_at <= time_range.end
]
# Run all analyses

View File

@@ -70,8 +70,7 @@ class TimeRange:
"""Create time range for last N hours."""
end = _utcnow()
start = datetime(
end.year, end.month, end.day, end.hour, end.minute, end.second,
tzinfo=UTC
end.year, end.month, end.day, end.hour, end.minute, end.second, tzinfo=UTC
) - __import__("datetime").timedelta(hours=hours)
return cls(start=start, end=end)