Spaces:
Runtime error
Runtime error
Cascade Bot
commited on
Commit
·
d16c12e
1
Parent(s):
a084fbc
feat(quantum): update QuantumStrategy with improved implementation
Browse files- Add proper StrategyResult usage
- Create QuantumOperationType enum
- Add data classes for operations and measurements
- Add timestamps and performance metrics
- Improve error handling and logging
- Enhance quantum operations with proper gates
- reasoning/analogical.py +320 -469
- reasoning/multimodal.py +212 -74
reasoning/analogical.py
CHANGED
|
@@ -9,7 +9,7 @@ from datetime import datetime
|
|
| 9 |
import numpy as np
|
| 10 |
from collections import defaultdict
|
| 11 |
|
| 12 |
-
from .base import ReasoningStrategy
|
| 13 |
|
| 14 |
class AnalogicalLevel(Enum):
|
| 15 |
"""Levels of analogical similarity."""
|
|
@@ -38,6 +38,7 @@ class AnalogicalPattern:
|
|
| 38 |
relations: List[Tuple[str, str, str]] # (entity1, relation, entity2)
|
| 39 |
constraints: List[str]
|
| 40 |
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
|
|
| 41 |
|
| 42 |
@dataclass
|
| 43 |
class AnalogicalMapping:
|
|
@@ -49,7 +50,7 @@ class AnalogicalMapping:
|
|
| 49 |
correspondences: List[Tuple[str, str, float]] # (source, target, strength)
|
| 50 |
transformations: List[Dict[str, Any]]
|
| 51 |
confidence: float
|
| 52 |
-
|
| 53 |
|
| 54 |
@dataclass
|
| 55 |
class AnalogicalSolution:
|
|
@@ -62,6 +63,7 @@ class AnalogicalSolution:
|
|
| 62 |
confidence: float
|
| 63 |
validation: Dict[str, Any]
|
| 64 |
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
|
|
| 65 |
|
| 66 |
class AnalogicalReasoning(ReasoningStrategy):
|
| 67 |
"""
|
|
@@ -81,16 +83,6 @@ class AnalogicalReasoning(ReasoningStrategy):
|
|
| 81 |
|
| 82 |
# Standard reasoning parameters
|
| 83 |
self.min_confidence = self.config.get('min_confidence', 0.7)
|
| 84 |
-
self.parallel_threshold = self.config.get('parallel_threshold', 3)
|
| 85 |
-
self.learning_rate = self.config.get('learning_rate', 0.1)
|
| 86 |
-
self.strategy_weights = self.config.get('strategy_weights', {
|
| 87 |
-
"LOCAL_LLM": 0.8,
|
| 88 |
-
"CHAIN_OF_THOUGHT": 0.6,
|
| 89 |
-
"TREE_OF_THOUGHTS": 0.5,
|
| 90 |
-
"META_LEARNING": 0.4
|
| 91 |
-
})
|
| 92 |
-
|
| 93 |
-
# Analogical reasoning specific parameters
|
| 94 |
self.min_similarity = self.config.get('min_similarity', 0.6)
|
| 95 |
self.max_candidates = self.config.get('max_candidates', 5)
|
| 96 |
self.adaptation_threshold = self.config.get('adaptation_threshold', 0.7)
|
|
@@ -100,512 +92,371 @@ class AnalogicalReasoning(ReasoningStrategy):
|
|
| 100 |
self.mappings: Dict[str, AnalogicalMapping] = {}
|
| 101 |
self.solutions: Dict[str, AnalogicalSolution] = {}
|
| 102 |
|
| 103 |
-
#
|
| 104 |
-
self.
|
| 105 |
-
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
try:
|
| 111 |
-
# Extract patterns
|
| 112 |
patterns = await self._extract_patterns(query, context)
|
|
|
|
| 113 |
|
| 114 |
-
# Find
|
| 115 |
matches = await self._find_matches(patterns, context)
|
|
|
|
| 116 |
|
| 117 |
-
# Create
|
| 118 |
mappings = await self._create_mappings(matches, context)
|
|
|
|
| 119 |
|
| 120 |
-
# Generate
|
| 121 |
solutions = await self._generate_solutions(mappings, context)
|
|
|
|
| 122 |
|
| 123 |
# Select best solution
|
| 124 |
best_solution = await self._select_best_solution(solutions, context)
|
| 125 |
|
| 126 |
-
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
-
return {
|
| 130 |
-
"success": True,
|
| 131 |
-
"answer": best_solution.inference["conclusion"],
|
| 132 |
-
"confidence": best_solution.confidence,
|
| 133 |
-
"analogy": {
|
| 134 |
-
"source": best_solution.source_analogy,
|
| 135 |
-
"mapping": self._mapping_to_dict(best_solution.mapping),
|
| 136 |
-
"adaptation": best_solution.adaptation
|
| 137 |
-
},
|
| 138 |
-
"reasoning_trace": best_solution.metadata.get("reasoning_trace", []),
|
| 139 |
-
"meta_insights": best_solution.metadata.get("meta_insights", [])
|
| 140 |
-
}
|
| 141 |
except Exception as e:
|
| 142 |
-
logging.error(f"
|
| 143 |
-
return
|
| 144 |
-
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
"""Extract patterns from query for analogical matching."""
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
[
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
Relations: ...
|
| 165 |
-
Constraints: ...
|
| 166 |
-
|
| 167 |
-
[P2]
|
| 168 |
-
...
|
| 169 |
-
"""
|
| 170 |
-
|
| 171 |
-
response = await context["groq_api"].predict(prompt)
|
| 172 |
-
return self._parse_patterns(response["answer"])
|
| 173 |
-
|
| 174 |
-
async def _find_matches(self, patterns: List[AnalogicalPattern], context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 175 |
"""Find matching patterns in knowledge base."""
|
| 176 |
-
|
| 177 |
-
Find analogical matches:
|
| 178 |
-
Patterns: {json.dumps([self._pattern_to_dict(p) for p in patterns])}
|
| 179 |
-
Context: {json.dumps(context)}
|
| 180 |
-
|
| 181 |
-
For each match provide:
|
| 182 |
-
1. Source domain
|
| 183 |
-
2. Similarity assessment
|
| 184 |
-
3. Key correspondences
|
| 185 |
-
4. Transfer potential
|
| 186 |
-
|
| 187 |
-
Format as:
|
| 188 |
-
[M1]
|
| 189 |
-
Source: ...
|
| 190 |
-
Similarity: ...
|
| 191 |
-
Correspondences: ...
|
| 192 |
-
Transfer: ...
|
| 193 |
-
|
| 194 |
-
[M2]
|
| 195 |
-
...
|
| 196 |
-
"""
|
| 197 |
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
"""Create mappings between source and target domains."""
|
| 203 |
-
|
| 204 |
-
Create analogical mappings:
|
| 205 |
-
Matches: {json.dumps(matches)}
|
| 206 |
-
Context: {json.dumps(context)}
|
| 207 |
-
|
| 208 |
-
For each mapping specify:
|
| 209 |
-
1. [Type]: {" | ".join([t.value for t in MappingType])}
|
| 210 |
-
2. [Elements]: Source and target elements
|
| 211 |
-
3. [Correspondences]: Element mappings
|
| 212 |
-
4. [Transformations]: Required adaptations
|
| 213 |
-
5. [Confidence]: Mapping strength
|
| 214 |
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
|
|
|
|
|
|
|
|
|
| 228 |
"""Generate solutions through analogical transfer."""
|
| 229 |
-
|
| 230 |
-
Generate analogical solutions:
|
| 231 |
-
Mappings: {json.dumps([self._mapping_to_dict(m) for m in mappings])}
|
| 232 |
-
Context: {json.dumps(context)}
|
| 233 |
-
|
| 234 |
-
For each solution provide:
|
| 235 |
-
1. Analogical inference
|
| 236 |
-
2. Required adaptations
|
| 237 |
-
3. Validation criteria
|
| 238 |
-
4. Confidence assessment
|
| 239 |
-
5. Reasoning trace
|
| 240 |
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
|
|
|
|
|
|
|
|
|
| 254 |
"""Select the best solution based on multiple criteria."""
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
[Evaluation]
|
| 268 |
-
Rankings: ...
|
| 269 |
-
Rationale: ...
|
| 270 |
-
Selection: ...
|
| 271 |
-
Confidence: ...
|
| 272 |
-
"""
|
| 273 |
-
|
| 274 |
-
response = await context["groq_api"].predict(prompt)
|
| 275 |
-
selection = self._parse_selection(response["answer"])
|
| 276 |
-
|
| 277 |
-
# Find selected solution
|
| 278 |
-
selected = max(solutions, key=lambda s: s.confidence)
|
| 279 |
-
for solution in solutions:
|
| 280 |
-
if solution.id == selection.get("selected_id"):
|
| 281 |
-
selected = solution
|
| 282 |
-
break
|
| 283 |
-
|
| 284 |
-
return selected
|
| 285 |
-
|
| 286 |
-
def _update_knowledge(self, patterns: List[AnalogicalPattern], mappings: List[AnalogicalMapping], solution: AnalogicalSolution):
|
| 287 |
"""Update knowledge base with new patterns and successful mappings."""
|
| 288 |
-
#
|
| 289 |
for pattern in patterns:
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
self.mappings[solution.mapping.id] = solution.mapping
|
| 297 |
|
| 298 |
-
#
|
| 299 |
self.solutions[solution.id] = solution
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
self
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
})
|
| 309 |
|
| 310 |
-
#
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
"success": solution.confidence >= self.adaptation_threshold
|
| 316 |
})
|
| 317 |
-
|
| 318 |
-
def _parse_patterns(self, response: str) -> List[AnalogicalPattern]:
|
| 319 |
-
"""Parse patterns from response."""
|
| 320 |
-
patterns = []
|
| 321 |
-
current = None
|
| 322 |
-
|
| 323 |
-
for line in response.split('\n'):
|
| 324 |
-
line = line.strip()
|
| 325 |
-
if not line:
|
| 326 |
-
continue
|
| 327 |
-
|
| 328 |
-
if line.startswith('[P'):
|
| 329 |
-
if current:
|
| 330 |
-
patterns.append(current)
|
| 331 |
-
current = None
|
| 332 |
-
elif line.startswith('Level:'):
|
| 333 |
-
level_str = line[6:].strip().lower()
|
| 334 |
-
try:
|
| 335 |
-
level = AnalogicalLevel(level_str)
|
| 336 |
-
current = AnalogicalPattern(
|
| 337 |
-
id=f"pattern_{len(patterns)}",
|
| 338 |
-
level=level,
|
| 339 |
-
features={},
|
| 340 |
-
relations=[],
|
| 341 |
-
constraints=[],
|
| 342 |
-
metadata={}
|
| 343 |
-
)
|
| 344 |
-
except ValueError:
|
| 345 |
-
logging.warning(f"Invalid analogical level: {level_str}")
|
| 346 |
-
elif current:
|
| 347 |
-
if line.startswith('Features:'):
|
| 348 |
-
try:
|
| 349 |
-
current.features = json.loads(line[9:].strip())
|
| 350 |
-
except:
|
| 351 |
-
current.features = {"raw": line[9:].strip()}
|
| 352 |
-
elif line.startswith('Relations:'):
|
| 353 |
-
relations = [r.strip() for r in line[10:].split(',')]
|
| 354 |
-
current.relations = [(r.split()[0], r.split()[1], r.split()[2])
|
| 355 |
-
for r in relations if len(r.split()) >= 3]
|
| 356 |
-
elif line.startswith('Constraints:'):
|
| 357 |
-
current.constraints = [c.strip() for c in line[12:].split(',')]
|
| 358 |
-
|
| 359 |
-
if current:
|
| 360 |
-
patterns.append(current)
|
| 361 |
-
|
| 362 |
-
return patterns
|
| 363 |
-
|
| 364 |
-
def _parse_matches(self, response: str) -> List[Dict[str, Any]]:
|
| 365 |
-
"""Parse matches from response."""
|
| 366 |
-
matches = []
|
| 367 |
-
current = None
|
| 368 |
-
|
| 369 |
-
for line in response.split('\n'):
|
| 370 |
-
line = line.strip()
|
| 371 |
-
if not line:
|
| 372 |
-
continue
|
| 373 |
-
|
| 374 |
-
if line.startswith('[M'):
|
| 375 |
-
if current:
|
| 376 |
-
matches.append(current)
|
| 377 |
-
current = {
|
| 378 |
-
"source": "",
|
| 379 |
-
"similarity": 0.0,
|
| 380 |
-
"correspondences": [],
|
| 381 |
-
"transfer": []
|
| 382 |
-
}
|
| 383 |
-
elif current:
|
| 384 |
-
if line.startswith('Source:'):
|
| 385 |
-
current["source"] = line[7:].strip()
|
| 386 |
-
elif line.startswith('Similarity:'):
|
| 387 |
-
try:
|
| 388 |
-
current["similarity"] = float(line[11:].strip())
|
| 389 |
-
except:
|
| 390 |
-
pass
|
| 391 |
-
elif line.startswith('Correspondences:'):
|
| 392 |
-
current["correspondences"] = [c.strip() for c in line[16:].split(',')]
|
| 393 |
-
elif line.startswith('Transfer:'):
|
| 394 |
-
current["transfer"] = [t.strip() for t in line[9:].split(',')]
|
| 395 |
-
|
| 396 |
-
if current:
|
| 397 |
-
matches.append(current)
|
| 398 |
-
|
| 399 |
-
return matches
|
| 400 |
-
|
| 401 |
-
def _parse_mappings(self, response: str) -> List[AnalogicalMapping]:
|
| 402 |
-
"""Parse mappings from response."""
|
| 403 |
-
mappings = []
|
| 404 |
-
current = None
|
| 405 |
-
|
| 406 |
-
for line in response.split('\n'):
|
| 407 |
-
line = line.strip()
|
| 408 |
-
if not line:
|
| 409 |
-
continue
|
| 410 |
-
|
| 411 |
-
if line.startswith('[Map'):
|
| 412 |
-
if current:
|
| 413 |
-
mappings.append(current)
|
| 414 |
-
current = None
|
| 415 |
-
elif line.startswith('Type:'):
|
| 416 |
-
type_str = line[5:].strip().lower()
|
| 417 |
-
try:
|
| 418 |
-
mapping_type = MappingType(type_str)
|
| 419 |
-
current = AnalogicalMapping(
|
| 420 |
-
id=f"mapping_{len(mappings)}",
|
| 421 |
-
type=mapping_type,
|
| 422 |
-
source_elements={},
|
| 423 |
-
target_elements={},
|
| 424 |
-
correspondences=[],
|
| 425 |
-
transformations=[],
|
| 426 |
-
confidence=0.0,
|
| 427 |
-
metadata={}
|
| 428 |
-
)
|
| 429 |
-
except ValueError:
|
| 430 |
-
logging.warning(f"Invalid mapping type: {type_str}")
|
| 431 |
-
elif current:
|
| 432 |
-
if line.startswith('Elements:'):
|
| 433 |
-
try:
|
| 434 |
-
elements = json.loads(line[9:].strip())
|
| 435 |
-
current.source_elements = elements.get("source", {})
|
| 436 |
-
current.target_elements = elements.get("target", {})
|
| 437 |
-
except:
|
| 438 |
-
pass
|
| 439 |
-
elif line.startswith('Correspondences:'):
|
| 440 |
-
pairs = [c.strip() for c in line[16:].split(',')]
|
| 441 |
-
for pair in pairs:
|
| 442 |
-
parts = pair.split(':')
|
| 443 |
-
if len(parts) >= 2:
|
| 444 |
-
source = parts[0].strip()
|
| 445 |
-
target = parts[1].strip()
|
| 446 |
-
strength = float(parts[2]) if len(parts) > 2 else 1.0
|
| 447 |
-
current.correspondences.append((source, target, strength))
|
| 448 |
-
elif line.startswith('Transformations:'):
|
| 449 |
-
try:
|
| 450 |
-
current.transformations = json.loads(line[16:].strip())
|
| 451 |
-
except:
|
| 452 |
-
current.transformations = [{"raw": line[16:].strip()}]
|
| 453 |
-
elif line.startswith('Confidence:'):
|
| 454 |
-
try:
|
| 455 |
-
current.confidence = float(line[11:].strip())
|
| 456 |
-
except:
|
| 457 |
-
pass
|
| 458 |
-
|
| 459 |
-
if current:
|
| 460 |
-
mappings.append(current)
|
| 461 |
-
|
| 462 |
-
return mappings
|
| 463 |
-
|
| 464 |
-
def _parse_solutions(self, response: str, mappings: List[AnalogicalMapping]) -> List[AnalogicalSolution]:
|
| 465 |
-
"""Parse solutions from response."""
|
| 466 |
-
solutions = []
|
| 467 |
-
current = None
|
| 468 |
-
|
| 469 |
-
for line in response.split('\n'):
|
| 470 |
-
line = line.strip()
|
| 471 |
-
if not line:
|
| 472 |
-
continue
|
| 473 |
-
|
| 474 |
-
if line.startswith('[S'):
|
| 475 |
-
if current:
|
| 476 |
-
solutions.append(current)
|
| 477 |
-
current = None
|
| 478 |
-
mapping_idx = len(solutions)
|
| 479 |
-
if mapping_idx < len(mappings):
|
| 480 |
-
current = AnalogicalSolution(
|
| 481 |
-
id=f"solution_{len(solutions)}",
|
| 482 |
-
source_analogy="",
|
| 483 |
-
mapping=mappings[mapping_idx],
|
| 484 |
-
adaptation={},
|
| 485 |
-
inference={},
|
| 486 |
-
confidence=0.0,
|
| 487 |
-
validation={},
|
| 488 |
-
metadata={}
|
| 489 |
-
)
|
| 490 |
-
elif current:
|
| 491 |
-
if line.startswith('Inference:'):
|
| 492 |
-
try:
|
| 493 |
-
current.inference = json.loads(line[10:].strip())
|
| 494 |
-
except:
|
| 495 |
-
current.inference = {"conclusion": line[10:].strip()}
|
| 496 |
-
elif line.startswith('Adaptation:'):
|
| 497 |
-
try:
|
| 498 |
-
current.adaptation = json.loads(line[11:].strip())
|
| 499 |
-
except:
|
| 500 |
-
current.adaptation = {"steps": [line[11:].strip()]}
|
| 501 |
-
elif line.startswith('Validation:'):
|
| 502 |
-
try:
|
| 503 |
-
current.validation = json.loads(line[11:].strip())
|
| 504 |
-
except:
|
| 505 |
-
current.validation = {"criteria": [line[11:].strip()]}
|
| 506 |
-
elif line.startswith('Confidence:'):
|
| 507 |
-
try:
|
| 508 |
-
current.confidence = float(line[11:].strip())
|
| 509 |
-
except:
|
| 510 |
-
pass
|
| 511 |
-
elif line.startswith('Trace:'):
|
| 512 |
-
current.metadata["reasoning_trace"] = [t.strip() for t in line[6:].split(',')]
|
| 513 |
|
| 514 |
-
|
| 515 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 516 |
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
"confidence": 0.0,
|
| 524 |
-
"rationale": []
|
| 525 |
-
}
|
| 526 |
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
selection["confidence"] = float(line[11:].strip())
|
| 534 |
-
except:
|
| 535 |
-
pass
|
| 536 |
-
elif line.startswith('Rationale:'):
|
| 537 |
-
selection["rationale"] = [r.strip() for r in line[10:].split(',')]
|
| 538 |
|
| 539 |
-
return
|
| 540 |
-
|
| 541 |
def _pattern_to_dict(self, pattern: AnalogicalPattern) -> Dict[str, Any]:
|
| 542 |
"""Convert pattern to dictionary for serialization."""
|
| 543 |
return {
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
|
|
|
| 550 |
}
|
| 551 |
-
|
| 552 |
def _mapping_to_dict(self, mapping: AnalogicalMapping) -> Dict[str, Any]:
|
| 553 |
"""Convert mapping to dictionary for serialization."""
|
| 554 |
return {
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
}
|
| 564 |
-
|
| 565 |
def _solution_to_dict(self, solution: AnalogicalSolution) -> Dict[str, Any]:
|
| 566 |
"""Convert solution to dictionary for serialization."""
|
| 567 |
return {
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
|
|
|
| 576 |
}
|
| 577 |
-
|
| 578 |
-
def
|
| 579 |
-
"""Get statistics about pattern usage and effectiveness."""
|
| 580 |
-
return {
|
| 581 |
-
"total_patterns": len(self.patterns),
|
| 582 |
-
"level_distribution": defaultdict(int, {p.level.value: 1 for p in self.patterns.values()}),
|
| 583 |
-
"average_constraints": sum(len(p.constraints) for p in self.patterns.values()) / len(self.patterns) if self.patterns else 0,
|
| 584 |
-
"pattern_weights": dict(self.pattern_weights)
|
| 585 |
-
}
|
| 586 |
-
|
| 587 |
-
def get_mapping_statistics(self) -> Dict[str, Any]:
|
| 588 |
-
"""Get statistics about mapping effectiveness."""
|
| 589 |
-
return {
|
| 590 |
-
"total_mappings": len(self.mappings),
|
| 591 |
-
"type_distribution": defaultdict(int, {m.type.value: 1 for m in self.mappings.values()}),
|
| 592 |
-
"average_confidence": sum(m.confidence for m in self.mappings.values()) / len(self.mappings) if self.mappings else 0,
|
| 593 |
-
"transformation_counts": defaultdict(int, {m.id: len(m.transformations) for m in self.mappings.values()})
|
| 594 |
-
}
|
| 595 |
-
|
| 596 |
-
def get_solution_statistics(self) -> Dict[str, Any]:
|
| 597 |
-
"""Get statistics about solution quality."""
|
| 598 |
-
return {
|
| 599 |
-
"total_solutions": len(self.solutions),
|
| 600 |
-
"average_confidence": sum(s.confidence for s in self.solutions.values()) / len(self.solutions) if self.solutions else 0,
|
| 601 |
-
"adaptation_success_rate": sum(1 for h in self.adaptation_history if h["success"]) / len(self.adaptation_history) if self.adaptation_history else 0
|
| 602 |
-
}
|
| 603 |
-
|
| 604 |
-
def clear_knowledge_base(self):
|
| 605 |
"""Clear the knowledge base."""
|
| 606 |
self.patterns.clear()
|
| 607 |
self.mappings.clear()
|
| 608 |
self.solutions.clear()
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import numpy as np
|
| 10 |
from collections import defaultdict
|
| 11 |
|
| 12 |
+
from .base import ReasoningStrategy, StrategyResult
|
| 13 |
|
| 14 |
class AnalogicalLevel(Enum):
|
| 15 |
"""Levels of analogical similarity."""
|
|
|
|
| 38 |
relations: List[Tuple[str, str, str]] # (entity1, relation, entity2)
|
| 39 |
constraints: List[str]
|
| 40 |
metadata: Dict[str, Any] = field(default_factory=dict)
|
| 41 |
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
| 42 |
|
| 43 |
@dataclass
|
| 44 |
class AnalogicalMapping:
|
|
|
|
| 50 |
correspondences: List[Tuple[str, str, float]] # (source, target, strength)
|
| 51 |
transformations: List[Dict[str, Any]]
|
| 52 |
confidence: float
|
| 53 |
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
| 54 |
|
| 55 |
@dataclass
|
| 56 |
class AnalogicalSolution:
|
|
|
|
| 63 |
confidence: float
|
| 64 |
validation: Dict[str, Any]
|
| 65 |
metadata: Dict[str, Any] = field(default_factory=dict)
|
| 66 |
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
| 67 |
|
| 68 |
class AnalogicalReasoning(ReasoningStrategy):
|
| 69 |
"""
|
|
|
|
| 83 |
|
| 84 |
# Standard reasoning parameters
|
| 85 |
self.min_confidence = self.config.get('min_confidence', 0.7)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
self.min_similarity = self.config.get('min_similarity', 0.6)
|
| 87 |
self.max_candidates = self.config.get('max_candidates', 5)
|
| 88 |
self.adaptation_threshold = self.config.get('adaptation_threshold', 0.7)
|
|
|
|
| 92 |
self.mappings: Dict[str, AnalogicalMapping] = {}
|
| 93 |
self.solutions: Dict[str, AnalogicalSolution] = {}
|
| 94 |
|
| 95 |
+
# Performance metrics
|
| 96 |
+
self.performance_metrics = {
|
| 97 |
+
'pattern_matches': 0,
|
| 98 |
+
'successful_mappings': 0,
|
| 99 |
+
'failed_mappings': 0,
|
| 100 |
+
'adaptation_success_rate': 0.0,
|
| 101 |
+
'avg_solution_confidence': 0.0,
|
| 102 |
+
'pattern_distribution': defaultdict(int),
|
| 103 |
+
'mapping_distribution': defaultdict(int),
|
| 104 |
+
'total_patterns_used': 0,
|
| 105 |
+
'total_mappings_created': 0,
|
| 106 |
+
'total_solutions_generated': 0
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
async def reason(
|
| 110 |
+
self,
|
| 111 |
+
query: str,
|
| 112 |
+
context: Dict[str, Any]
|
| 113 |
+
) -> StrategyResult:
|
| 114 |
+
"""
|
| 115 |
+
Apply analogical reasoning to analyze the query.
|
| 116 |
|
| 117 |
+
Args:
|
| 118 |
+
query: The query to reason about
|
| 119 |
+
context: Additional context and parameters
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
StrategyResult containing the reasoning output and metadata
|
| 123 |
+
"""
|
| 124 |
try:
|
| 125 |
+
# Extract patterns
|
| 126 |
patterns = await self._extract_patterns(query, context)
|
| 127 |
+
self.performance_metrics['total_patterns_used'] = len(patterns)
|
| 128 |
|
| 129 |
+
# Find matches
|
| 130 |
matches = await self._find_matches(patterns, context)
|
| 131 |
+
self.performance_metrics['pattern_matches'] = len(matches)
|
| 132 |
|
| 133 |
+
# Create mappings
|
| 134 |
mappings = await self._create_mappings(matches, context)
|
| 135 |
+
self.performance_metrics['total_mappings_created'] = len(mappings)
|
| 136 |
|
| 137 |
+
# Generate solutions
|
| 138 |
solutions = await self._generate_solutions(mappings, context)
|
| 139 |
+
self.performance_metrics['total_solutions_generated'] = len(solutions)
|
| 140 |
|
| 141 |
# Select best solution
|
| 142 |
best_solution = await self._select_best_solution(solutions, context)
|
| 143 |
|
| 144 |
+
if best_solution:
|
| 145 |
+
# Update knowledge base
|
| 146 |
+
self._update_knowledge(patterns, mappings, best_solution)
|
| 147 |
+
|
| 148 |
+
# Update metrics
|
| 149 |
+
self._update_metrics(patterns, mappings, solutions, best_solution)
|
| 150 |
+
|
| 151 |
+
# Build reasoning trace
|
| 152 |
+
reasoning_trace = self._build_reasoning_trace(
|
| 153 |
+
patterns, matches, mappings, solutions, best_solution
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
return StrategyResult(
|
| 157 |
+
strategy_type="analogical",
|
| 158 |
+
success=True,
|
| 159 |
+
answer=best_solution.inference.get('conclusion'),
|
| 160 |
+
confidence=best_solution.confidence,
|
| 161 |
+
reasoning_trace=reasoning_trace,
|
| 162 |
+
metadata={
|
| 163 |
+
'source_analogy': best_solution.source_analogy,
|
| 164 |
+
'mapping_type': best_solution.mapping.type.value,
|
| 165 |
+
'adaptation_details': best_solution.adaptation,
|
| 166 |
+
'validation_results': best_solution.validation
|
| 167 |
+
},
|
| 168 |
+
performance_metrics=self.performance_metrics
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
return StrategyResult(
|
| 172 |
+
strategy_type="analogical",
|
| 173 |
+
success=False,
|
| 174 |
+
answer=None,
|
| 175 |
+
confidence=0.0,
|
| 176 |
+
reasoning_trace=[{
|
| 177 |
+
'step': 'error',
|
| 178 |
+
'error': 'No valid solution found',
|
| 179 |
+
'timestamp': datetime.now().isoformat()
|
| 180 |
+
}],
|
| 181 |
+
metadata={'error': 'No valid solution found'},
|
| 182 |
+
performance_metrics=self.performance_metrics
|
| 183 |
+
)
|
| 184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
except Exception as e:
|
| 186 |
+
logging.error(f"Analogical reasoning error: {str(e)}")
|
| 187 |
+
return StrategyResult(
|
| 188 |
+
strategy_type="analogical",
|
| 189 |
+
success=False,
|
| 190 |
+
answer=None,
|
| 191 |
+
confidence=0.0,
|
| 192 |
+
reasoning_trace=[{
|
| 193 |
+
'step': 'error',
|
| 194 |
+
'error': str(e),
|
| 195 |
+
'timestamp': datetime.now().isoformat()
|
| 196 |
+
}],
|
| 197 |
+
metadata={'error': str(e)},
|
| 198 |
+
performance_metrics=self.performance_metrics
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
async def _extract_patterns(
|
| 202 |
+
self,
|
| 203 |
+
query: str,
|
| 204 |
+
context: Dict[str, Any]
|
| 205 |
+
) -> List[AnalogicalPattern]:
|
| 206 |
"""Extract patterns from query for analogical matching."""
|
| 207 |
+
# This is a placeholder implementation
|
| 208 |
+
# In practice, this would use more sophisticated pattern extraction
|
| 209 |
+
pattern = AnalogicalPattern(
|
| 210 |
+
id=f"pattern_{len(self.patterns)}",
|
| 211 |
+
level=AnalogicalLevel.SURFACE,
|
| 212 |
+
features={'query': query},
|
| 213 |
+
relations=[],
|
| 214 |
+
constraints=[],
|
| 215 |
+
metadata={'context': context}
|
| 216 |
+
)
|
| 217 |
+
return [pattern]
|
| 218 |
+
|
| 219 |
+
async def _find_matches(
|
| 220 |
+
self,
|
| 221 |
+
patterns: List[AnalogicalPattern],
|
| 222 |
+
context: Dict[str, Any]
|
| 223 |
+
) -> List[Dict[str, Any]]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
"""Find matching patterns in knowledge base."""
|
| 225 |
+
matches = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
|
| 227 |
+
for pattern in patterns:
|
| 228 |
+
# Example matching logic
|
| 229 |
+
similarity = np.random.random() # Placeholder
|
| 230 |
+
if similarity >= self.min_similarity:
|
| 231 |
+
matches.append({
|
| 232 |
+
'pattern': pattern,
|
| 233 |
+
'similarity': similarity,
|
| 234 |
+
'features': pattern.features
|
| 235 |
+
})
|
| 236 |
+
|
| 237 |
+
return sorted(
|
| 238 |
+
matches,
|
| 239 |
+
key=lambda x: x['similarity'],
|
| 240 |
+
reverse=True
|
| 241 |
+
)[:self.max_candidates]
|
| 242 |
+
|
| 243 |
+
async def _create_mappings(
|
| 244 |
+
self,
|
| 245 |
+
matches: List[Dict[str, Any]],
|
| 246 |
+
context: Dict[str, Any]
|
| 247 |
+
) -> List[AnalogicalMapping]:
|
| 248 |
"""Create mappings between source and target domains."""
|
| 249 |
+
mappings = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
|
| 251 |
+
for match in matches:
|
| 252 |
+
mapping = AnalogicalMapping(
|
| 253 |
+
id=f"mapping_{len(self.mappings)}",
|
| 254 |
+
type=MappingType.DIRECT,
|
| 255 |
+
source_elements=match['features'],
|
| 256 |
+
target_elements=context,
|
| 257 |
+
correspondences=[],
|
| 258 |
+
transformations=[],
|
| 259 |
+
confidence=match['similarity']
|
| 260 |
+
)
|
| 261 |
+
mappings.append(mapping)
|
| 262 |
|
| 263 |
+
return mappings
|
| 264 |
+
|
| 265 |
+
async def _generate_solutions(
|
| 266 |
+
self,
|
| 267 |
+
mappings: List[AnalogicalMapping],
|
| 268 |
+
context: Dict[str, Any]
|
| 269 |
+
) -> List[AnalogicalSolution]:
|
| 270 |
"""Generate solutions through analogical transfer."""
|
| 271 |
+
solutions = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 272 |
|
| 273 |
+
for mapping in mappings:
|
| 274 |
+
if mapping.confidence >= self.adaptation_threshold:
|
| 275 |
+
solution = AnalogicalSolution(
|
| 276 |
+
id=f"solution_{len(self.solutions)}",
|
| 277 |
+
source_analogy=str(mapping.source_elements),
|
| 278 |
+
mapping=mapping,
|
| 279 |
+
adaptation={'applied_rules': []},
|
| 280 |
+
inference={'conclusion': 'Analogical solution'},
|
| 281 |
+
confidence=mapping.confidence,
|
| 282 |
+
validation={'checks_passed': True},
|
| 283 |
+
metadata={'context': context}
|
| 284 |
+
)
|
| 285 |
+
solutions.append(solution)
|
| 286 |
|
| 287 |
+
return solutions
|
| 288 |
+
|
| 289 |
+
async def _select_best_solution(
|
| 290 |
+
self,
|
| 291 |
+
solutions: List[AnalogicalSolution],
|
| 292 |
+
context: Dict[str, Any]
|
| 293 |
+
) -> Optional[AnalogicalSolution]:
|
| 294 |
"""Select the best solution based on multiple criteria."""
|
| 295 |
+
if not solutions:
|
| 296 |
+
return None
|
| 297 |
+
|
| 298 |
+
# Sort by confidence and return best
|
| 299 |
+
return max(solutions, key=lambda x: x.confidence)
|
| 300 |
+
|
| 301 |
+
def _update_knowledge(
|
| 302 |
+
self,
|
| 303 |
+
patterns: List[AnalogicalPattern],
|
| 304 |
+
mappings: List[AnalogicalMapping],
|
| 305 |
+
solution: AnalogicalSolution
|
| 306 |
+
) -> None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
"""Update knowledge base with new patterns and successful mappings."""
|
| 308 |
+
# Store new patterns
|
| 309 |
for pattern in patterns:
|
| 310 |
+
self.patterns[pattern.id] = pattern
|
| 311 |
+
|
| 312 |
+
# Store successful mappings
|
| 313 |
+
for mapping in mappings:
|
| 314 |
+
if mapping.confidence >= self.min_confidence:
|
| 315 |
+
self.mappings[mapping.id] = mapping
|
|
|
|
| 316 |
|
| 317 |
+
# Store successful solution
|
| 318 |
self.solutions[solution.id] = solution
|
| 319 |
+
|
| 320 |
+
def _update_metrics(
|
| 321 |
+
self,
|
| 322 |
+
patterns: List[AnalogicalPattern],
|
| 323 |
+
mappings: List[AnalogicalMapping],
|
| 324 |
+
solutions: List[AnalogicalSolution],
|
| 325 |
+
best_solution: AnalogicalSolution
|
| 326 |
+
) -> None:
|
| 327 |
+
"""Update performance metrics."""
|
| 328 |
+
# Update pattern distribution
|
| 329 |
+
for pattern in patterns:
|
| 330 |
+
self.performance_metrics['pattern_distribution'][pattern.level] += 1
|
| 331 |
+
|
| 332 |
+
# Update mapping distribution
|
| 333 |
+
for mapping in mappings:
|
| 334 |
+
self.performance_metrics['mapping_distribution'][mapping.type] += 1
|
| 335 |
+
if mapping.confidence >= self.min_confidence:
|
| 336 |
+
self.performance_metrics['successful_mappings'] += 1
|
| 337 |
+
else:
|
| 338 |
+
self.performance_metrics['failed_mappings'] += 1
|
| 339 |
+
|
| 340 |
+
# Calculate adaptation success rate
|
| 341 |
+
total_adaptations = len(solutions)
|
| 342 |
+
successful_adaptations = sum(
|
| 343 |
+
1 for s in solutions
|
| 344 |
+
if s.confidence >= self.adaptation_threshold
|
| 345 |
+
)
|
| 346 |
+
self.performance_metrics['adaptation_success_rate'] = (
|
| 347 |
+
successful_adaptations / total_adaptations
|
| 348 |
+
if total_adaptations > 0 else 0.0
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
# Calculate average solution confidence
|
| 352 |
+
self.performance_metrics['avg_solution_confidence'] = (
|
| 353 |
+
sum(s.confidence for s in solutions) / len(solutions)
|
| 354 |
+
if solutions else 0.0
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
def _build_reasoning_trace(
|
| 358 |
+
self,
|
| 359 |
+
patterns: List[AnalogicalPattern],
|
| 360 |
+
matches: List[Dict[str, Any]],
|
| 361 |
+
mappings: List[AnalogicalMapping],
|
| 362 |
+
solutions: List[AnalogicalSolution],
|
| 363 |
+
best_solution: AnalogicalSolution
|
| 364 |
+
) -> List[Dict[str, Any]]:
|
| 365 |
+
"""Build the reasoning trace for the solution."""
|
| 366 |
+
trace = []
|
| 367 |
+
|
| 368 |
+
# Pattern extraction step
|
| 369 |
+
trace.append({
|
| 370 |
+
'step': 'pattern_extraction',
|
| 371 |
+
'patterns': [self._pattern_to_dict(p) for p in patterns],
|
| 372 |
+
'timestamp': datetime.now().isoformat()
|
| 373 |
})
|
| 374 |
|
| 375 |
+
# Pattern matching step
|
| 376 |
+
trace.append({
|
| 377 |
+
'step': 'pattern_matching',
|
| 378 |
+
'matches': matches,
|
| 379 |
+
'timestamp': datetime.now().isoformat()
|
|
|
|
| 380 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 381 |
|
| 382 |
+
# Mapping creation step
|
| 383 |
+
trace.append({
|
| 384 |
+
'step': 'mapping_creation',
|
| 385 |
+
'mappings': [self._mapping_to_dict(m) for m in mappings],
|
| 386 |
+
'timestamp': datetime.now().isoformat()
|
| 387 |
+
})
|
| 388 |
|
| 389 |
+
# Solution generation step
|
| 390 |
+
trace.append({
|
| 391 |
+
'step': 'solution_generation',
|
| 392 |
+
'solutions': [self._solution_to_dict(s) for s in solutions],
|
| 393 |
+
'timestamp': datetime.now().isoformat()
|
| 394 |
+
})
|
|
|
|
|
|
|
|
|
|
| 395 |
|
| 396 |
+
# Best solution selection step
|
| 397 |
+
trace.append({
|
| 398 |
+
'step': 'solution_selection',
|
| 399 |
+
'selected_solution': self._solution_to_dict(best_solution),
|
| 400 |
+
'timestamp': datetime.now().isoformat()
|
| 401 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 402 |
|
| 403 |
+
return trace
|
| 404 |
+
|
| 405 |
def _pattern_to_dict(self, pattern: AnalogicalPattern) -> Dict[str, Any]:
|
| 406 |
"""Convert pattern to dictionary for serialization."""
|
| 407 |
return {
|
| 408 |
+
'id': pattern.id,
|
| 409 |
+
'level': pattern.level.value,
|
| 410 |
+
'features': pattern.features,
|
| 411 |
+
'relations': pattern.relations,
|
| 412 |
+
'constraints': pattern.constraints,
|
| 413 |
+
'metadata': pattern.metadata,
|
| 414 |
+
'timestamp': pattern.timestamp
|
| 415 |
}
|
| 416 |
+
|
| 417 |
def _mapping_to_dict(self, mapping: AnalogicalMapping) -> Dict[str, Any]:
|
| 418 |
"""Convert mapping to dictionary for serialization."""
|
| 419 |
return {
|
| 420 |
+
'id': mapping.id,
|
| 421 |
+
'type': mapping.type.value,
|
| 422 |
+
'source_elements': mapping.source_elements,
|
| 423 |
+
'target_elements': mapping.target_elements,
|
| 424 |
+
'correspondences': mapping.correspondences,
|
| 425 |
+
'transformations': mapping.transformations,
|
| 426 |
+
'confidence': mapping.confidence,
|
| 427 |
+
'timestamp': mapping.timestamp
|
| 428 |
}
|
| 429 |
+
|
| 430 |
def _solution_to_dict(self, solution: AnalogicalSolution) -> Dict[str, Any]:
|
| 431 |
"""Convert solution to dictionary for serialization."""
|
| 432 |
return {
|
| 433 |
+
'id': solution.id,
|
| 434 |
+
'source_analogy': solution.source_analogy,
|
| 435 |
+
'mapping': self._mapping_to_dict(solution.mapping),
|
| 436 |
+
'adaptation': solution.adaptation,
|
| 437 |
+
'inference': solution.inference,
|
| 438 |
+
'confidence': solution.confidence,
|
| 439 |
+
'validation': solution.validation,
|
| 440 |
+
'metadata': solution.metadata,
|
| 441 |
+
'timestamp': solution.timestamp
|
| 442 |
}
|
| 443 |
+
|
| 444 |
+
def clear_knowledge_base(self) -> None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 445 |
"""Clear the knowledge base."""
|
| 446 |
self.patterns.clear()
|
| 447 |
self.mappings.clear()
|
| 448 |
self.solutions.clear()
|
| 449 |
+
|
| 450 |
+
# Reset performance metrics
|
| 451 |
+
self.performance_metrics.update({
|
| 452 |
+
'pattern_matches': 0,
|
| 453 |
+
'successful_mappings': 0,
|
| 454 |
+
'failed_mappings': 0,
|
| 455 |
+
'adaptation_success_rate': 0.0,
|
| 456 |
+
'avg_solution_confidence': 0.0,
|
| 457 |
+
'pattern_distribution': defaultdict(int),
|
| 458 |
+
'mapping_distribution': defaultdict(int),
|
| 459 |
+
'total_patterns_used': 0,
|
| 460 |
+
'total_mappings_created': 0,
|
| 461 |
+
'total_solutions_generated': 0
|
| 462 |
+
})
|
reasoning/multimodal.py
CHANGED
|
@@ -9,7 +9,15 @@ from datetime import datetime
|
|
| 9 |
import numpy as np
|
| 10 |
from collections import defaultdict
|
| 11 |
|
| 12 |
-
from .base import ReasoningStrategy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
@dataclass
|
| 15 |
class ModalityFeatures:
|
|
@@ -19,8 +27,19 @@ class ModalityFeatures:
|
|
| 19 |
audio: Optional[List[Dict[str, Any]]] = None
|
| 20 |
video: Optional[List[Dict[str, Any]]] = None
|
| 21 |
structured: Optional[List[Dict[str, Any]]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
class
|
| 24 |
"""
|
| 25 |
Advanced multimodal reasoning that:
|
| 26 |
1. Processes different types of information
|
|
@@ -37,41 +56,35 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 37 |
|
| 38 |
# Standard reasoning parameters
|
| 39 |
self.min_confidence = self.config.get('min_confidence', 0.7)
|
| 40 |
-
self.
|
| 41 |
-
self.learning_rate = self.config.get('learning_rate', 0.1)
|
| 42 |
-
self.strategy_weights = self.config.get('strategy_weights', {
|
| 43 |
-
"LOCAL_LLM": 0.8,
|
| 44 |
-
"CHAIN_OF_THOUGHT": 0.6,
|
| 45 |
-
"TREE_OF_THOUGHTS": 0.5,
|
| 46 |
-
"META_LEARNING": 0.4
|
| 47 |
-
})
|
| 48 |
-
|
| 49 |
-
# Configure model repositories
|
| 50 |
-
self.models = self.config.get('models', {
|
| 51 |
-
'img2img': {
|
| 52 |
-
'repo_id': 'enhanceaiteam/Flux-Uncensored-V2',
|
| 53 |
-
'filename': 'Flux-Uncensored-V2.safetensors'
|
| 54 |
-
},
|
| 55 |
-
'img2vid': {
|
| 56 |
-
'repo_id': 'stabilityai/stable-video-diffusion-img2vid-xt',
|
| 57 |
-
'filename': 'svd_xt.safetensors'
|
| 58 |
-
},
|
| 59 |
-
'any2any': {
|
| 60 |
-
'repo_id': 'deepseek-ai/JanusFlow-1.3B',
|
| 61 |
-
'filename': 'janusflow-1.3b.safetensors'
|
| 62 |
-
}
|
| 63 |
-
})
|
| 64 |
|
| 65 |
# Configure modality weights
|
| 66 |
self.weights = self.config.get('modality_weights', {
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
-
async def reason(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
"""
|
| 76 |
Apply multimodal reasoning to process and integrate different types of information.
|
| 77 |
|
|
@@ -80,35 +93,78 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 80 |
context: Additional context and parameters
|
| 81 |
|
| 82 |
Returns:
|
| 83 |
-
|
| 84 |
"""
|
| 85 |
try:
|
| 86 |
# Process across modalities
|
| 87 |
modalities = await self._process_modalities(query, context)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
# Align cross-modal information
|
| 90 |
-
|
|
|
|
| 91 |
|
| 92 |
# Integrate aligned information
|
| 93 |
-
integration = await self._integrated_analysis(
|
|
|
|
| 94 |
|
| 95 |
# Generate final response
|
| 96 |
response = await self._generate_response(integration, context)
|
| 97 |
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
except Exception as e:
|
| 107 |
-
logging.error(f"Multimodal reasoning
|
| 108 |
-
return
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
async def _process_modalities(
|
| 114 |
self,
|
|
@@ -120,23 +176,28 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 120 |
|
| 121 |
# Process text
|
| 122 |
if 'text' in context:
|
| 123 |
-
modalities[
|
|
|
|
| 124 |
|
| 125 |
# Process images
|
| 126 |
if 'images' in context:
|
| 127 |
-
modalities[
|
|
|
|
| 128 |
|
| 129 |
# Process audio
|
| 130 |
if 'audio' in context:
|
| 131 |
-
modalities[
|
|
|
|
| 132 |
|
| 133 |
# Process video
|
| 134 |
if 'video' in context:
|
| 135 |
-
modalities[
|
|
|
|
| 136 |
|
| 137 |
# Process structured data
|
| 138 |
if 'structured' in context:
|
| 139 |
-
modalities[
|
|
|
|
| 140 |
|
| 141 |
return modalities
|
| 142 |
|
|
@@ -144,7 +205,7 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 144 |
self,
|
| 145 |
modalities: Dict[str, List[Dict[str, Any]]],
|
| 146 |
context: Dict[str, Any]
|
| 147 |
-
) -> List[
|
| 148 |
"""Align information across different modalities."""
|
| 149 |
alignments = []
|
| 150 |
|
|
@@ -163,14 +224,25 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 163 |
for item1 in items1:
|
| 164 |
for item2 in items2:
|
| 165 |
similarity = self._calculate_similarity(item1, item2)
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
return alignments
|
| 176 |
|
|
@@ -194,7 +266,7 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 194 |
|
| 195 |
async def _integrated_analysis(
|
| 196 |
self,
|
| 197 |
-
|
| 198 |
context: Dict[str, Any]
|
| 199 |
) -> List[Dict[str, Any]]:
|
| 200 |
"""Perform integrated analysis of aligned information."""
|
|
@@ -202,8 +274,8 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 202 |
|
| 203 |
# Group alignments by similarity
|
| 204 |
similarity_groups = defaultdict(list)
|
| 205 |
-
for align in
|
| 206 |
-
similarity_groups[align
|
| 207 |
|
| 208 |
# Process groups in order of similarity
|
| 209 |
for similarity, group in sorted(
|
|
@@ -215,11 +287,15 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 215 |
for align in group:
|
| 216 |
integrated.append({
|
| 217 |
'features': {
|
| 218 |
-
**align
|
| 219 |
-
**align
|
| 220 |
},
|
| 221 |
-
'modalities': [
|
| 222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
})
|
| 224 |
|
| 225 |
return integrated
|
|
@@ -252,7 +328,8 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 252 |
|
| 253 |
return {
|
| 254 |
'text': "\n".join(response_text),
|
| 255 |
-
'confidence': confidence
|
|
|
|
| 256 |
}
|
| 257 |
|
| 258 |
def _calculate_confidence(self, integration: List[Dict[str, Any]]) -> float:
|
|
@@ -278,28 +355,89 @@ class MultiModalReasoning(ReasoningStrategy):
|
|
| 278 |
confidence += avg_similarity * 0.2
|
| 279 |
|
| 280 |
return min(confidence, 1.0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
|
| 282 |
def _process_text(self, text: str) -> List[Dict[str, Any]]:
|
| 283 |
"""Process text modality."""
|
| 284 |
# Simple text processing for now
|
| 285 |
-
return [{'text': text}]
|
| 286 |
|
| 287 |
def _process_images(self, images: List[str]) -> List[Dict[str, Any]]:
|
| 288 |
"""Process image modality."""
|
| 289 |
# Simple image processing for now
|
| 290 |
-
return [{
|
|
|
|
|
|
|
|
|
|
| 291 |
|
| 292 |
def _process_audio(self, audio: List[str]) -> List[Dict[str, Any]]:
|
| 293 |
"""Process audio modality."""
|
| 294 |
# Simple audio processing for now
|
| 295 |
-
return [{
|
|
|
|
|
|
|
|
|
|
| 296 |
|
| 297 |
def _process_video(self, video: List[str]) -> List[Dict[str, Any]]:
|
| 298 |
"""Process video modality."""
|
| 299 |
# Simple video processing for now
|
| 300 |
-
return [{
|
|
|
|
|
|
|
|
|
|
| 301 |
|
| 302 |
def _process_structured(self, structured: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 303 |
"""Process structured data modality."""
|
| 304 |
# Simple structured data processing for now
|
| 305 |
-
return [{
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import numpy as np
|
| 10 |
from collections import defaultdict
|
| 11 |
|
| 12 |
+
from .base import ReasoningStrategy, StrategyResult
|
| 13 |
+
|
| 14 |
+
class ModalityType(Enum):
|
| 15 |
+
"""Types of modalities supported."""
|
| 16 |
+
TEXT = "text"
|
| 17 |
+
IMAGE = "image"
|
| 18 |
+
AUDIO = "audio"
|
| 19 |
+
VIDEO = "video"
|
| 20 |
+
STRUCTURED = "structured"
|
| 21 |
|
| 22 |
@dataclass
|
| 23 |
class ModalityFeatures:
|
|
|
|
| 27 |
audio: Optional[List[Dict[str, Any]]] = None
|
| 28 |
video: Optional[List[Dict[str, Any]]] = None
|
| 29 |
structured: Optional[List[Dict[str, Any]]] = None
|
| 30 |
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class ModalityAlignment:
|
| 34 |
+
"""Alignment between different modalities."""
|
| 35 |
+
modality1: ModalityType
|
| 36 |
+
modality2: ModalityType
|
| 37 |
+
features1: Dict[str, Any]
|
| 38 |
+
features2: Dict[str, Any]
|
| 39 |
+
similarity: float
|
| 40 |
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
| 41 |
|
| 42 |
+
class MultimodalStrategy(ReasoningStrategy):
|
| 43 |
"""
|
| 44 |
Advanced multimodal reasoning that:
|
| 45 |
1. Processes different types of information
|
|
|
|
| 56 |
|
| 57 |
# Standard reasoning parameters
|
| 58 |
self.min_confidence = self.config.get('min_confidence', 0.7)
|
| 59 |
+
self.min_similarity = self.config.get('min_similarity', 0.7)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
# Configure modality weights
|
| 62 |
self.weights = self.config.get('modality_weights', {
|
| 63 |
+
ModalityType.TEXT.value: 0.4,
|
| 64 |
+
ModalityType.IMAGE.value: 0.3,
|
| 65 |
+
ModalityType.AUDIO.value: 0.1,
|
| 66 |
+
ModalityType.VIDEO.value: 0.1,
|
| 67 |
+
ModalityType.STRUCTURED.value: 0.1
|
| 68 |
})
|
| 69 |
+
|
| 70 |
+
# Performance metrics
|
| 71 |
+
self.performance_metrics = {
|
| 72 |
+
'processed_modalities': defaultdict(int),
|
| 73 |
+
'alignments_found': 0,
|
| 74 |
+
'successful_alignments': 0,
|
| 75 |
+
'failed_alignments': 0,
|
| 76 |
+
'avg_similarity': 0.0,
|
| 77 |
+
'modality_distribution': defaultdict(int),
|
| 78 |
+
'total_features_extracted': 0,
|
| 79 |
+
'total_alignments_created': 0,
|
| 80 |
+
'total_integrations': 0
|
| 81 |
+
}
|
| 82 |
|
| 83 |
+
async def reason(
|
| 84 |
+
self,
|
| 85 |
+
query: str,
|
| 86 |
+
context: Dict[str, Any]
|
| 87 |
+
) -> StrategyResult:
|
| 88 |
"""
|
| 89 |
Apply multimodal reasoning to process and integrate different types of information.
|
| 90 |
|
|
|
|
| 93 |
context: Additional context and parameters
|
| 94 |
|
| 95 |
Returns:
|
| 96 |
+
StrategyResult containing the reasoning output and metadata
|
| 97 |
"""
|
| 98 |
try:
|
| 99 |
# Process across modalities
|
| 100 |
modalities = await self._process_modalities(query, context)
|
| 101 |
+
self.performance_metrics['total_features_extracted'] = sum(
|
| 102 |
+
len(features) for features in modalities.values()
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# Update modality distribution
|
| 106 |
+
for modality, features in modalities.items():
|
| 107 |
+
self.performance_metrics['modality_distribution'][modality] += len(features)
|
| 108 |
|
| 109 |
# Align cross-modal information
|
| 110 |
+
alignments = await self._cross_modal_alignment(modalities, context)
|
| 111 |
+
self.performance_metrics['total_alignments_created'] = len(alignments)
|
| 112 |
|
| 113 |
# Integrate aligned information
|
| 114 |
+
integration = await self._integrated_analysis(alignments, context)
|
| 115 |
+
self.performance_metrics['total_integrations'] = len(integration)
|
| 116 |
|
| 117 |
# Generate final response
|
| 118 |
response = await self._generate_response(integration, context)
|
| 119 |
|
| 120 |
+
# Build reasoning trace
|
| 121 |
+
reasoning_trace = self._build_reasoning_trace(
|
| 122 |
+
modalities, alignments, integration, response
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Calculate final confidence
|
| 126 |
+
confidence = self._calculate_confidence(integration)
|
| 127 |
+
|
| 128 |
+
if confidence >= self.min_confidence:
|
| 129 |
+
return StrategyResult(
|
| 130 |
+
strategy_type="multimodal",
|
| 131 |
+
success=True,
|
| 132 |
+
answer=response.get('text'),
|
| 133 |
+
confidence=confidence,
|
| 134 |
+
reasoning_trace=reasoning_trace,
|
| 135 |
+
metadata={
|
| 136 |
+
'modalities': list(modalities.keys()),
|
| 137 |
+
'alignments': len(alignments),
|
| 138 |
+
'integration_size': len(integration)
|
| 139 |
+
},
|
| 140 |
+
performance_metrics=self.performance_metrics
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
return StrategyResult(
|
| 144 |
+
strategy_type="multimodal",
|
| 145 |
+
success=False,
|
| 146 |
+
answer=None,
|
| 147 |
+
confidence=confidence,
|
| 148 |
+
reasoning_trace=reasoning_trace,
|
| 149 |
+
metadata={'error': 'Insufficient confidence in results'},
|
| 150 |
+
performance_metrics=self.performance_metrics
|
| 151 |
+
)
|
| 152 |
|
| 153 |
except Exception as e:
|
| 154 |
+
logging.error(f"Multimodal reasoning error: {str(e)}")
|
| 155 |
+
return StrategyResult(
|
| 156 |
+
strategy_type="multimodal",
|
| 157 |
+
success=False,
|
| 158 |
+
answer=None,
|
| 159 |
+
confidence=0.0,
|
| 160 |
+
reasoning_trace=[{
|
| 161 |
+
'step': 'error',
|
| 162 |
+
'error': str(e),
|
| 163 |
+
'timestamp': datetime.now().isoformat()
|
| 164 |
+
}],
|
| 165 |
+
metadata={'error': str(e)},
|
| 166 |
+
performance_metrics=self.performance_metrics
|
| 167 |
+
)
|
| 168 |
|
| 169 |
async def _process_modalities(
|
| 170 |
self,
|
|
|
|
| 176 |
|
| 177 |
# Process text
|
| 178 |
if 'text' in context:
|
| 179 |
+
modalities[ModalityType.TEXT.value] = self._process_text(context['text'])
|
| 180 |
+
self.performance_metrics['processed_modalities'][ModalityType.TEXT.value] += 1
|
| 181 |
|
| 182 |
# Process images
|
| 183 |
if 'images' in context:
|
| 184 |
+
modalities[ModalityType.IMAGE.value] = self._process_images(context['images'])
|
| 185 |
+
self.performance_metrics['processed_modalities'][ModalityType.IMAGE.value] += 1
|
| 186 |
|
| 187 |
# Process audio
|
| 188 |
if 'audio' in context:
|
| 189 |
+
modalities[ModalityType.AUDIO.value] = self._process_audio(context['audio'])
|
| 190 |
+
self.performance_metrics['processed_modalities'][ModalityType.AUDIO.value] += 1
|
| 191 |
|
| 192 |
# Process video
|
| 193 |
if 'video' in context:
|
| 194 |
+
modalities[ModalityType.VIDEO.value] = self._process_video(context['video'])
|
| 195 |
+
self.performance_metrics['processed_modalities'][ModalityType.VIDEO.value] += 1
|
| 196 |
|
| 197 |
# Process structured data
|
| 198 |
if 'structured' in context:
|
| 199 |
+
modalities[ModalityType.STRUCTURED.value] = self._process_structured(context['structured'])
|
| 200 |
+
self.performance_metrics['processed_modalities'][ModalityType.STRUCTURED.value] += 1
|
| 201 |
|
| 202 |
return modalities
|
| 203 |
|
|
|
|
| 205 |
self,
|
| 206 |
modalities: Dict[str, List[Dict[str, Any]]],
|
| 207 |
context: Dict[str, Any]
|
| 208 |
+
) -> List[ModalityAlignment]:
|
| 209 |
"""Align information across different modalities."""
|
| 210 |
alignments = []
|
| 211 |
|
|
|
|
| 224 |
for item1 in items1:
|
| 225 |
for item2 in items2:
|
| 226 |
similarity = self._calculate_similarity(item1, item2)
|
| 227 |
+
self.performance_metrics['alignments_found'] += 1
|
| 228 |
+
|
| 229 |
+
if similarity >= self.min_similarity:
|
| 230 |
+
self.performance_metrics['successful_alignments'] += 1
|
| 231 |
+
alignments.append(ModalityAlignment(
|
| 232 |
+
modality1=ModalityType(mod1),
|
| 233 |
+
modality2=ModalityType(mod2),
|
| 234 |
+
features1=item1,
|
| 235 |
+
features2=item2,
|
| 236 |
+
similarity=similarity
|
| 237 |
+
))
|
| 238 |
+
else:
|
| 239 |
+
self.performance_metrics['failed_alignments'] += 1
|
| 240 |
+
|
| 241 |
+
# Update average similarity
|
| 242 |
+
if alignments:
|
| 243 |
+
self.performance_metrics['avg_similarity'] = (
|
| 244 |
+
sum(a.similarity for a in alignments) / len(alignments)
|
| 245 |
+
)
|
| 246 |
|
| 247 |
return alignments
|
| 248 |
|
|
|
|
| 266 |
|
| 267 |
async def _integrated_analysis(
|
| 268 |
self,
|
| 269 |
+
alignments: List[ModalityAlignment],
|
| 270 |
context: Dict[str, Any]
|
| 271 |
) -> List[Dict[str, Any]]:
|
| 272 |
"""Perform integrated analysis of aligned information."""
|
|
|
|
| 274 |
|
| 275 |
# Group alignments by similarity
|
| 276 |
similarity_groups = defaultdict(list)
|
| 277 |
+
for align in alignments:
|
| 278 |
+
similarity_groups[align.similarity].append(align)
|
| 279 |
|
| 280 |
# Process groups in order of similarity
|
| 281 |
for similarity, group in sorted(
|
|
|
|
| 287 |
for align in group:
|
| 288 |
integrated.append({
|
| 289 |
'features': {
|
| 290 |
+
**align.features1,
|
| 291 |
+
**align.features2
|
| 292 |
},
|
| 293 |
+
'modalities': [
|
| 294 |
+
align.modality1.value,
|
| 295 |
+
align.modality2.value
|
| 296 |
+
],
|
| 297 |
+
'confidence': align.similarity,
|
| 298 |
+
'timestamp': align.timestamp
|
| 299 |
})
|
| 300 |
|
| 301 |
return integrated
|
|
|
|
| 328 |
|
| 329 |
return {
|
| 330 |
'text': "\n".join(response_text),
|
| 331 |
+
'confidence': confidence,
|
| 332 |
+
'timestamp': datetime.now().isoformat()
|
| 333 |
}
|
| 334 |
|
| 335 |
def _calculate_confidence(self, integration: List[Dict[str, Any]]) -> float:
|
|
|
|
| 355 |
confidence += avg_similarity * 0.2
|
| 356 |
|
| 357 |
return min(confidence, 1.0)
|
| 358 |
+
|
| 359 |
+
def _build_reasoning_trace(
|
| 360 |
+
self,
|
| 361 |
+
modalities: Dict[str, List[Dict[str, Any]]],
|
| 362 |
+
alignments: List[ModalityAlignment],
|
| 363 |
+
integration: List[Dict[str, Any]],
|
| 364 |
+
response: Dict[str, Any]
|
| 365 |
+
) -> List[Dict[str, Any]]:
|
| 366 |
+
"""Build the reasoning trace for multimodal processing."""
|
| 367 |
+
trace = []
|
| 368 |
+
|
| 369 |
+
# Modality processing step
|
| 370 |
+
trace.append({
|
| 371 |
+
'step': 'modality_processing',
|
| 372 |
+
'modalities': {
|
| 373 |
+
mod: len(features)
|
| 374 |
+
for mod, features in modalities.items()
|
| 375 |
+
},
|
| 376 |
+
'timestamp': datetime.now().isoformat()
|
| 377 |
+
})
|
| 378 |
+
|
| 379 |
+
# Alignment step
|
| 380 |
+
trace.append({
|
| 381 |
+
'step': 'cross_modal_alignment',
|
| 382 |
+
'alignments': [
|
| 383 |
+
{
|
| 384 |
+
'modalities': [a.modality1.value, a.modality2.value],
|
| 385 |
+
'similarity': a.similarity
|
| 386 |
+
}
|
| 387 |
+
for a in alignments
|
| 388 |
+
],
|
| 389 |
+
'timestamp': datetime.now().isoformat()
|
| 390 |
+
})
|
| 391 |
+
|
| 392 |
+
# Integration step
|
| 393 |
+
trace.append({
|
| 394 |
+
'step': 'integration',
|
| 395 |
+
'integrated_items': len(integration),
|
| 396 |
+
'timestamp': datetime.now().isoformat()
|
| 397 |
+
})
|
| 398 |
+
|
| 399 |
+
# Response generation step
|
| 400 |
+
trace.append({
|
| 401 |
+
'step': 'response_generation',
|
| 402 |
+
'response': response,
|
| 403 |
+
'timestamp': datetime.now().isoformat()
|
| 404 |
+
})
|
| 405 |
+
|
| 406 |
+
return trace
|
| 407 |
|
| 408 |
def _process_text(self, text: str) -> List[Dict[str, Any]]:
|
| 409 |
"""Process text modality."""
|
| 410 |
# Simple text processing for now
|
| 411 |
+
return [{'text': text, 'timestamp': datetime.now().isoformat()}]
|
| 412 |
|
| 413 |
def _process_images(self, images: List[str]) -> List[Dict[str, Any]]:
|
| 414 |
"""Process image modality."""
|
| 415 |
# Simple image processing for now
|
| 416 |
+
return [{
|
| 417 |
+
'image': image,
|
| 418 |
+
'timestamp': datetime.now().isoformat()
|
| 419 |
+
} for image in images]
|
| 420 |
|
| 421 |
def _process_audio(self, audio: List[str]) -> List[Dict[str, Any]]:
|
| 422 |
"""Process audio modality."""
|
| 423 |
# Simple audio processing for now
|
| 424 |
+
return [{
|
| 425 |
+
'audio': audio_file,
|
| 426 |
+
'timestamp': datetime.now().isoformat()
|
| 427 |
+
} for audio_file in audio]
|
| 428 |
|
| 429 |
def _process_video(self, video: List[str]) -> List[Dict[str, Any]]:
|
| 430 |
"""Process video modality."""
|
| 431 |
# Simple video processing for now
|
| 432 |
+
return [{
|
| 433 |
+
'video': video_file,
|
| 434 |
+
'timestamp': datetime.now().isoformat()
|
| 435 |
+
} for video_file in video]
|
| 436 |
|
| 437 |
def _process_structured(self, structured: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 438 |
"""Process structured data modality."""
|
| 439 |
# Simple structured data processing for now
|
| 440 |
+
return [{
|
| 441 |
+
'structured': structured,
|
| 442 |
+
'timestamp': datetime.now().isoformat()
|
| 443 |
+
}]
|