Upload 10 files
Browse files- aee_bias_detector.py +81 -0
- aee_core_classes_era.py +73 -0
- aee_era_main.py +178 -0
- aee_explainer_era.py +100 -0
- aee_extractor_era.py +254 -0
- aee_linker_era.py +153 -0
- aee_updater.py +188 -0
- aee_updater_era.py +129 -0
- aee_utils.py +41 -0
- aee_validator.py +97 -0
aee_bias_detector.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_bias_detector.py
|
2 |
+
# AEE v3.0: Bilgi tabanındaki potansiyel yanlılıkları sezmek için sezgisel yöntemler uygular.
|
3 |
+
# v3.0.4 (Era): Import düzeltildi, Source Diversity'de initial_confidence kullanıldı, Arg Balance check basitleştirildi.
|
4 |
+
|
5 |
+
from typing import Dict, List, Optional, Counter
|
6 |
+
from collections import defaultdict, Counter
|
7 |
+
|
8 |
+
# Era sürümündeki DOĞRU sınıfları import et
|
9 |
+
try:
|
10 |
+
from aee_core_classes_era import Proposition, EpistemicData
|
11 |
+
except ImportError:
|
12 |
+
print("Bias Detector Error: Could not import from aee_core_classes_era.py.")
|
13 |
+
Proposition = None; EpistemicData = None
|
14 |
+
|
15 |
+
# --- Yanlılık Sezme Fonksiyonları ---
|
16 |
+
def detect_source_diversity_bias(kb: Dict[str, Proposition], subject_threshold: int = 2, confidence_threshold: float = 0.6, diversity_threshold: int = 2):
|
17 |
+
if not Proposition: return
|
18 |
+
print(f" Running Source Diversity Check...")
|
19 |
+
subjects_of_interest = defaultdict(list)
|
20 |
+
for prop_id, prop in kb.items():
|
21 |
+
if prop.subject_lemma: subjects_of_interest[prop.subject_lemma].append(prop_id)
|
22 |
+
flagged_props_count = 0
|
23 |
+
for subject, prop_ids in subjects_of_interest.items():
|
24 |
+
if len(prop_ids) >= subject_threshold:
|
25 |
+
high_conf_props = []; source_types = set()
|
26 |
+
for prop_id in prop_ids:
|
27 |
+
prop = kb.get(prop_id)
|
28 |
+
# initial_confidence kullanıyoruz (çünkü bu check update'ten önce çalışıyor)
|
29 |
+
if prop and prop.epistemic_data.initial_confidence is not None and \
|
30 |
+
prop.epistemic_data.initial_confidence >= confidence_threshold:
|
31 |
+
high_conf_props.append(prop)
|
32 |
+
source_type = prop.epistemic_data.source_type # source_type None olabilir
|
33 |
+
source_types.add(source_type if source_type else "unknown_type") # None ise unknown ata
|
34 |
+
if len(source_types) < diversity_threshold and high_conf_props:
|
35 |
+
bias_flag = "SOURCE_MONOCULTURE"; print(f" Potential Bias Detected: Subject '{subject}' low diversity ({len(source_types)}<{diversity_threshold}). Flagging {len(high_conf_props)} props.")
|
36 |
+
for prop in high_conf_props:
|
37 |
+
if bias_flag not in prop.epistemic_data.bias_flags: prop.epistemic_data.bias_flags.append(bias_flag); flagged_props_count +=1
|
38 |
+
print(f" Source Diversity Check complete. Flagged {flagged_props_count} propositions.")
|
39 |
+
|
40 |
+
def detect_argument_balance_bias(kb: Dict[str, Proposition], confidence_threshold: float = 0.7):
|
41 |
+
if not Proposition: return
|
42 |
+
print(f" Running Argument Balance Check...")
|
43 |
+
flagged_props_count = 0; bias_flag = "POTENTIAL_UNBALANCED_ARG"
|
44 |
+
for prop in kb.values():
|
45 |
+
ep_data = prop.epistemic_data
|
46 |
+
# computed_confidence kullan (bu check update'ten sonra da anlamlı olabilir, ama şimdilik önce çalışıyor)
|
47 |
+
# Basitleştirilmiş kontrol: Listelerin varlığını ve içeriğini direkt kontrol et
|
48 |
+
if ep_data.computed_confidence is not None and \
|
49 |
+
ep_data.computed_confidence >= confidence_threshold and \
|
50 |
+
ep_data.supports and not ep_data.contradicts: # Destek listesi dolu VE Çelişki listesi boş ise
|
51 |
+
if bias_flag not in ep_data.bias_flags: ep_data.bias_flags.append(bias_flag); flagged_props_count += 1
|
52 |
+
print(f" Argument Balance Check complete. Flagged {flagged_props_count} propositions.")
|
53 |
+
|
54 |
+
|
55 |
+
def run_bias_detection_v3(kb: Dict[str, Proposition]):
|
56 |
+
if not Proposition: print("Error: Cannot run bias detection..."); return
|
57 |
+
print("\nRunning v3 Bias Detection Heuristics...")
|
58 |
+
if kb: detect_source_diversity_bias(kb); detect_argument_balance_bias(kb)
|
59 |
+
else: print(" Skipping bias detection as Knowledge Base is empty.")
|
60 |
+
print("Bias Detection Heuristics complete.")
|
61 |
+
|
62 |
+
# --- Test Bloğu ---
|
63 |
+
if __name__ == "__main__":
|
64 |
+
# ... (Test bloğu öncekiyle aynı - değişiklik yok) ...
|
65 |
+
print("Testing AEE Bias Detector Module (v3.0.4 - Reviewed)...")
|
66 |
+
if Proposition and EpistemicData: # EpistemicData kontrolü eklendi
|
67 |
+
# ... Test kodunun geri kalanı aynı ...
|
68 |
+
print("Creating a mock Knowledge Base for bias testing...")
|
69 |
+
kb_test: Dict[str, Proposition] = {}
|
70 |
+
ed_ai1=EpistemicData(source_id="techblog1", source_type="blog", initial_confidence=0.8, computed_confidence=0.85); p_ai1 = Proposition("AI is beneficial","AI is beneficial", ed_ai1, "ai","be","beneficial"); kb_test[p_ai1.prop_id]=p_ai1
|
71 |
+
ed_ai2=EpistemicData(source_id="techblog2", source_type="blog", initial_confidence=0.7, computed_confidence=0.75); p_ai2 = Proposition("AI improves efficiency","AI improves efficiency", ed_ai2, "ai","improve","efficiency"); kb_test[p_ai2.prop_id]=p_ai2
|
72 |
+
ed_ai3=EpistemicData(source_id="techblog3", source_type="blog", initial_confidence=0.9, computed_confidence=0.92); p_ai3 = Proposition("AI creates jobs","AI creates jobs", ed_ai3, "ai","create","job"); kb_test[p_ai3.prop_id]=p_ai3
|
73 |
+
ed_cc1=EpistemicData(source_id="science.org", source_type="scientific_paper", initial_confidence=0.95, computed_confidence=0.96); p_cc1 = Proposition("warming is real","warming is real", ed_cc1, "warming","be","real"); kb_test[p_cc1.prop_id]=p_cc1
|
74 |
+
ed_cc2=EpistemicData(source_id="news.com", source_type="news", initial_confidence=0.7, computed_confidence=0.72); p_cc2 = Proposition("warming is accelerated","warming is accelerated", ed_cc2, "warming","be","accelerated"); kb_test[p_cc2.prop_id]=p_cc2
|
75 |
+
ed_safe=EpistemicData(source_id="safety_report", source_type="report", initial_confidence=0.9, computed_confidence=0.91); p_safe = Proposition("System X is safe","System X is safe", ed_safe, "system x","be","safe"); kb_test[p_safe.prop_id]=p_safe
|
76 |
+
ed_support=EpistemicData(source_id="internal_memo", source_type="memo", initial_confidence=0.6, computed_confidence=0.65); p_support = Proposition("System X passed tests","System X passed tests", ed_support, "system x","pass","test"); kb_test[p_support.prop_id]=p_support
|
77 |
+
p_safe.epistemic_data.supports.append(p_support.prop_id); print(f"Mock KB created with {len(kb_test)} propositions.")
|
78 |
+
run_bias_detection_v3(kb_test)
|
79 |
+
print("\n--- Final KB State (Bias Detector Test) ---"); [print(f"ID: {pid[:8]} | Subj: {p.subject_lemma} | InitConf: {p.epistemic_data.initial_confidence:.2f} | Bias: {p.epistemic_data.bias_flags}") for pid,p in kb_test.items()] # initial_confidence kullanıldı
|
80 |
+
else: print("Could not run tests because class import failed.")
|
81 |
+
print("\nBias Detector module testing complete.")
|
aee_core_classes_era.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_core_classes_era.py
|
2 |
+
# AEE Projesi için temel veri yapılarını tanımlar.
|
3 |
+
# Era Sürümü: EpistemicData'ya plausibility eklendi.
|
4 |
+
|
5 |
+
import uuid
|
6 |
+
from dataclasses import dataclass, field
|
7 |
+
from datetime import datetime
|
8 |
+
from typing import List, Optional, Dict, Any
|
9 |
+
|
10 |
+
@dataclass
|
11 |
+
class EpistemicData:
|
12 |
+
"""Bir önerme ile ilişkili epistemik (bilgibilimsel) verileri tutar."""
|
13 |
+
source_id: str
|
14 |
+
timestamp: datetime = field(default_factory=datetime.now)
|
15 |
+
initial_confidence: float = 0.5
|
16 |
+
computed_confidence: float = 0.5
|
17 |
+
source_type: Optional[str] = None
|
18 |
+
reliability_score: Optional[float] = None
|
19 |
+
|
20 |
+
# v2+ Bağlantılar
|
21 |
+
supports: List[str] = field(default_factory=list)
|
22 |
+
contradicts: List[str] = field(default_factory=list)
|
23 |
+
|
24 |
+
# v3+ İşaretler
|
25 |
+
bias_flags: List[str] = field(default_factory=list)
|
26 |
+
|
27 |
+
# YENİ ERA Alanları:
|
28 |
+
plausibility_score: Optional[float] = None # Önermenin genel makullük/olabilirlik skoru (örn: 0.0-1.0)
|
29 |
+
validation_notes: List[str] = field(default_factory=list) # Makullük kontrolünden gelen notlar (örn: ['Contradicts common sense'])
|
30 |
+
|
31 |
+
other_metadata: Dict[str, Any] = field(default_factory=dict)
|
32 |
+
|
33 |
+
def __post_init__(self):
|
34 |
+
self.computed_confidence = self.initial_confidence
|
35 |
+
|
36 |
+
@dataclass
|
37 |
+
class Proposition:
|
38 |
+
"""Metinden çıkarılan bir bilgi birimini (önermeyi) temsil eder."""
|
39 |
+
# Non-default fields first
|
40 |
+
text_span: str
|
41 |
+
sentence_text: str
|
42 |
+
epistemic_data: EpistemicData # Artık Era uyumlu EpistemicData içerecek
|
43 |
+
|
44 |
+
# Default fields last
|
45 |
+
prop_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
46 |
+
subject_lemma: Optional[str] = None
|
47 |
+
relation_lemma: Optional[str] = None
|
48 |
+
value_lemma: Optional[str] = None
|
49 |
+
is_negated: bool = False
|
50 |
+
other_analysis: Dict[str, Any] = field(default_factory=dict)
|
51 |
+
|
52 |
+
def __str__(self):
|
53 |
+
# Raporlamada kolaylık için __str__ güncellenebilir, şimdilik aynı.
|
54 |
+
neg_str = "[NEGATED] " if self.is_negated else ""
|
55 |
+
return (f"Prop({self.prop_id[:8]}): {neg_str}"
|
56 |
+
f"{self.subject_lemma} - {self.relation_lemma} - {self.value_lemma} "
|
57 |
+
f"(Conf: {self.epistemic_data.computed_confidence:.2f}, Src: {self.epistemic_data.source_id})")
|
58 |
+
|
59 |
+
# --- Test Bloğu ---
|
60 |
+
if __name__ == "__main__":
|
61 |
+
print("Testing AEE Core Classes (Era Version)...")
|
62 |
+
ed1 = EpistemicData(source_id="src_test", initial_confidence=0.7)
|
63 |
+
ed1.plausibility_score = 0.9 # Test için manuel atama
|
64 |
+
ed1.validation_notes.append("Seems plausible based on initial check.")
|
65 |
+
print(f"Created EpistemicData (Era): {ed1}")
|
66 |
+
prop1 = Proposition(
|
67 |
+
text_span="Test span", sentence_text="Test sentence.", epistemic_data=ed1,
|
68 |
+
subject_lemma="test", relation_lemma="be", value_lemma="ok"
|
69 |
+
)
|
70 |
+
print(f"Created Proposition (Era): {prop1}")
|
71 |
+
print(f" Plausibility: {prop1.epistemic_data.plausibility_score}")
|
72 |
+
print(f" Validation Notes: {prop1.epistemic_data.validation_notes}")
|
73 |
+
print("\nCore classes (Era) seem functional.")
|
aee_era_main.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_era_main.py
|
2 |
+
# AEE Era Sürümü İşlem Hattını Çalıştıran Ana Script
|
3 |
+
# Era Extractor ve Linker entegre edildi. (Era Adım 2 Tamamlandı - Proje Kodu Bitti!)
|
4 |
+
|
5 |
+
import time
|
6 |
+
from typing import Dict, List, Optional, Any
|
7 |
+
|
8 |
+
# Era sürümü klasöründeki TÜM modülleri import et
|
9 |
+
try:
|
10 |
+
from aee_core_classes_era import Proposition, EpistemicData
|
11 |
+
from aee_extractor_era import process_with_spacy, extract_propositions_era, NLP_MODEL # Era Extractor
|
12 |
+
from aee_linker_era import find_and_link_evidence_era # Era Linker
|
13 |
+
from aee_updater_era import run_updates_era # Era Updater
|
14 |
+
from aee_explainer_era import generate_explanation_era # Era Explainer
|
15 |
+
from aee_bias_detector import run_bias_detection_v3 # v3 Bias Detector
|
16 |
+
from aee_validator import check_plausibility_v_era # Era Validator
|
17 |
+
from aee_utils import get_proposition_by_id # Utils
|
18 |
+
except ImportError as e:
|
19 |
+
print(f"Fatal Error: Could not import necessary modules. Check file paths and dependencies in AEE/Era folder.")
|
20 |
+
print(f"Import Error: {e}")
|
21 |
+
exit()
|
22 |
+
|
23 |
+
# --- Raporlama Fonksiyonu (Era) ---
|
24 |
+
def report_kb_era(kb: Dict[str, Proposition]):
|
25 |
+
# ... (Öncekiyle aynı - değişiklik yok) ...
|
26 |
+
print("\n" + "="*70); print(" AEE Era Version - Knowledge Base Report (Final Status)"); print("="*70)
|
27 |
+
if not kb: print("Knowledge Base is empty."); print("="*70); return
|
28 |
+
print(f"Total propositions in KB: {len(kb)}"); print("-"*70)
|
29 |
+
propositions_by_source: Dict[str, List[Proposition]] = {};
|
30 |
+
def sort_key(prop): conf = prop.epistemic_data.computed_confidence; return conf if conf is not None else -1.0
|
31 |
+
sorted_props = sorted(list(kb.values()), key=sort_key, reverse=True)
|
32 |
+
for prop in sorted_props: source = prop.epistemic_data.source_id; propositions_by_source.setdefault(source, []).append(prop)
|
33 |
+
for source_id in sorted(propositions_by_source.keys()):
|
34 |
+
props = propositions_by_source[source_id]; source_reliability = getattr(props[0].epistemic_data, 'reliability_score', None)
|
35 |
+
reliability_str = f"{source_reliability:.2f}" if source_reliability is not None else "N/A"
|
36 |
+
print(f"\n--- Source: {source_id} (Calculated Reliability: {reliability_str}) ---")
|
37 |
+
for prop in props:
|
38 |
+
neg_str = "[NEGATED] " if prop.is_negated else ""
|
39 |
+
supports_str = ', '.join([pid[:8] for pid in prop.epistemic_data.supports]) if prop.epistemic_data.supports else "None"
|
40 |
+
contradicts_str = ', '.join([pid[:8] for pid in prop.epistemic_data.contradicts]) if prop.epistemic_data.contradicts else "None"
|
41 |
+
bias_str = ', '.join(prop.epistemic_data.bias_flags) if prop.epistemic_data.bias_flags else "None"
|
42 |
+
plausibility_score = prop.epistemic_data.plausibility_score; plausibility_str = f"{plausibility_score:.2f}" if plausibility_score is not None else "N/A"
|
43 |
+
validation_notes_str = ', '.join(prop.epistemic_data.validation_notes) if prop.epistemic_data.validation_notes else "None"
|
44 |
+
conf_score = prop.epistemic_data.computed_confidence; conf_str = f"{conf_score:.3f}" if conf_score is not None else "N/A"
|
45 |
+
init_conf_score = prop.epistemic_data.initial_confidence; init_conf_str = f"{init_conf_score:.2f}" if init_conf_score is not None else "N/A"
|
46 |
+
print(f" Prop ID : {prop.prop_id}")
|
47 |
+
print(f" Struct: {neg_str}{prop.subject_lemma} - {prop.relation_lemma} - {prop.value_lemma}")
|
48 |
+
print(f" Conf. : {conf_str} (Initial: {init_conf_str})") # Initial conf'un değiştiğini göreceğiz
|
49 |
+
print(f" Links : Supports: [{supports_str}] | Contradicts: [{contradicts_str}]")
|
50 |
+
print(f" Biases: [{bias_str}]")
|
51 |
+
print(f" Plaus.: {plausibility_str} | Notes: [{validation_notes_str}]")
|
52 |
+
print("\n" + "="*70); print(" End of KB Report "); print("="*70)
|
53 |
+
|
54 |
+
|
55 |
+
# --- Ana İşlem Fonksiyonu (Era - Final) ---
|
56 |
+
def run_aee_era_pipeline(inputs: List[Dict[str, str]]) -> Dict[str, Proposition]:
|
57 |
+
"""
|
58 |
+
Verilen girdiler için AEE Era işlem hattını tam olarak çalıştırır
|
59 |
+
(Era Extract, Plausibility Check, Era Linker, Bias Detect, Era Update).
|
60 |
+
"""
|
61 |
+
if NLP_MODEL is None: print("FATAL ERROR: spaCy model not loaded."); return {}
|
62 |
+
|
63 |
+
print("\nStarting AEE Era Final Pipeline...")
|
64 |
+
knowledge_base: Dict[str, Proposition] = {}
|
65 |
+
start_time = time.time()
|
66 |
+
|
67 |
+
# 1. Adım: Extract (Era) & Validate Plausibility & Link (Era)
|
68 |
+
print("Phase 1: Extracting(Era), Validating Plausibility, and Linking(Era)...")
|
69 |
+
all_extracted_props_before_linking: List[Proposition] = []
|
70 |
+
for item in inputs:
|
71 |
+
source_id = item.get("source_id", f"unknown_source_{int(time.time())}"); text = item.get("text", "")
|
72 |
+
if not text: continue
|
73 |
+
doc = process_with_spacy(text)
|
74 |
+
if doc:
|
75 |
+
# ERA EXTRACTOR ÇAĞIRILIYOR
|
76 |
+
extracted_props = extract_propositions_era(doc, source_id)
|
77 |
+
for prop in extracted_props:
|
78 |
+
plausibility_score, validation_notes = check_plausibility_v_era(prop)
|
79 |
+
if hasattr(prop, 'epistemic_data') and prop.epistemic_data:
|
80 |
+
prop.epistemic_data.plausibility_score = plausibility_score
|
81 |
+
if validation_notes: prop.epistemic_data.validation_notes.extend(validation_notes)
|
82 |
+
all_extracted_props_before_linking.append(prop)
|
83 |
+
print(f" Phase 1a (Extraction(Era) & Validation) complete. Total extracted: {len(all_extracted_props_before_linking)}")
|
84 |
+
|
85 |
+
print(" Phase 1b (Linking(Era))...")
|
86 |
+
if find_and_link_evidence_era: # Era linker fonksiyonu
|
87 |
+
for new_prop in all_extracted_props_before_linking:
|
88 |
+
if new_prop.prop_id not in knowledge_base:
|
89 |
+
# ERA LINKER ÇAĞIRILIYOR
|
90 |
+
find_and_link_evidence_era(new_prop, knowledge_base)
|
91 |
+
knowledge_base[new_prop.prop_id] = new_prop
|
92 |
+
else: print("Skipping linking due to import error.")
|
93 |
+
print(f"Phase 1 (Extract(Era), Validate, Link(Era)) complete. KB size: {len(knowledge_base)}")
|
94 |
+
|
95 |
+
# 1.5 Adım: Bias Detection (v3)
|
96 |
+
print("\nPhase 1.5: Running Bias Detection Heuristics...")
|
97 |
+
if run_bias_detection_v3 and knowledge_base: run_bias_detection_v3(knowledge_base)
|
98 |
+
else: print("Skipping Bias Detection due to import error or empty KB.")
|
99 |
+
print("Phase 1.5 complete.")
|
100 |
+
|
101 |
+
# 2. Adım: Update (Era Mantığı ile)
|
102 |
+
print("\nPhase 2: Running Era Updates (Reliability, Cycle Detect, Plausibility-aware Confidence)...")
|
103 |
+
if run_updates_era: updated_knowledge_base = run_updates_era(knowledge_base) # ERA Updater
|
104 |
+
else: print("Skipping Updates due to import error."); updated_knowledge_base = knowledge_base
|
105 |
+
print("Phase 2 complete.")
|
106 |
+
|
107 |
+
end_time = time.time(); print(f"\nPipeline finished in {end_time - start_time:.2f} seconds.")
|
108 |
+
return updated_knowledge_base
|
109 |
+
|
110 |
+
# --- Ana Çalışma Bloğu ---
|
111 |
+
if __name__ == "__main__":
|
112 |
+
# Era sürümünün tüm yeteneklerini test edecek örnek girdiler
|
113 |
+
sample_inputs_era_final = [
|
114 |
+
{
|
115 |
+
"source_id": "fact_sheet_1", "source_type": "fact",
|
116 |
+
"text": "Water is H2O. The sun is hot. Ice is cold." # Yüksek plausibility, basit zıtlık
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"source_id": "opinion_blog_A", "source_type": "blog",
|
120 |
+
"text": "Maybe the new policy is good. It could improve things. Perhaps." # Düşük başlangıç güveni (linguistic)
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"source_id": "opinion_blog_B", "source_type": "blog",
|
124 |
+
"text": "The new policy is definitely bad! It will undoubtedly harm the economy. It is not good." # Yüksek başlangıç güveni (linguistic) + Zıtlık (good/bad)
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"source_id": "report_X", "source_type": "report",
|
128 |
+
"text": "System Alpha is bigger than System Beta. System Beta is not small compared to Alpha." # İlişkisel çelişki?
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"source_id": "another_report", "source_type": "report",
|
132 |
+
"text": "System Alpha is large." # 'bigger' ile eşanlamlı? (Synonym desteği için)
|
133 |
+
},
|
134 |
+
{ # Döngü + Plausibility düşük + Kaynak Tekelciliği
|
135 |
+
"source_id": "conspiracy_theory.blog", "source_type": "blog",
|
136 |
+
"text": "The moon landing was faked because the photos look wrong. The photos look wrong because the shadows are incorrect. The shadows are incorrect because the landing was faked."
|
137 |
+
# Extractor muhtemelen bunları çıkaramaz, ama bias/cycle test için dursun.
|
138 |
+
# Ekstra düşük plausibility testi:
|
139 |
+
"Also, the moon is made of cheese."
|
140 |
+
}
|
141 |
+
]
|
142 |
+
|
143 |
+
# İşlem hattını çalıştır
|
144 |
+
final_kb_era = run_aee_era_pipeline(sample_inputs_era_final)
|
145 |
+
|
146 |
+
# Genel KB Raporunu yazdır
|
147 |
+
report_kb_era(final_kb_era)
|
148 |
+
|
149 |
+
# Örnek Açıklamaları Üret ve Yazdır
|
150 |
+
print("\n" + "#"*70)
|
151 |
+
print(" AEE Era Version - Generating Explanations")
|
152 |
+
print("#"*70)
|
153 |
+
if final_kb_era and generate_explanation_era:
|
154 |
+
ids_to_explain = list(final_kb_era.keys())
|
155 |
+
print(f"\nGenerating explanations for all {len(ids_to_explain)} propositions...\n")
|
156 |
+
for prop_id in ids_to_explain:
|
157 |
+
explanation = generate_explanation_era(prop_id, final_kb_era) # ERA Explainer
|
158 |
+
print(explanation)
|
159 |
+
print("-" * 40)
|
160 |
+
else: print("Knowledge Base is empty or Explainer not available.")
|
161 |
+
print("\n" + "#"*70); print(" Explanation generation step complete."); print("#"*70)
|
162 |
+
|
163 |
+
# PROJE KODLAMASI TAMAMLANDI MESAJI
|
164 |
+
print("\n###########################################################################")
|
165 |
+
print("# AEE ERA VERSION - ALL PLANNED CORE FEATURE CODING COMPLETE!")
|
166 |
+
print("# All modules updated to Era versions where planned.")
|
167 |
+
print("# Project includes: Extraction(Era.2a), Validation(Era.1), Linking(Era.2b),")
|
168 |
+
print("# Bias Detection(v3), Updates(Era.1d), Explanation(Era.1e).")
|
169 |
+
print("#")
|
170 |
+
print("# FINAL STEP (FOR YOU): TESTING & EVALUATION!")
|
171 |
+
print("# - Run this script: python aee_era_main.py")
|
172 |
+
print("# - Examine the report and explanations thoroughly.")
|
173 |
+
print("# - Check if initial confidence reflects modality.")
|
174 |
+
print("# - Check if more links (support/contradiction) are found.")
|
175 |
+
print("# - Check bias flags, plausibility, final confidence.")
|
176 |
+
print("# - Try your own texts!")
|
177 |
+
print("# - Provide your final feedback and evaluation.")
|
178 |
+
print("###########################################################################")
|
aee_explainer_era.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_explainer_era.py
|
2 |
+
# AEE Era Sürümü: Önermenin epistemik durumu hakkında plausibility dahil açıklama üretir.
|
3 |
+
|
4 |
+
from typing import Dict, List, Optional, Any
|
5 |
+
|
6 |
+
# Era sürümündeki sınıfları ve utils'i import et
|
7 |
+
try:
|
8 |
+
from aee_core_classes_era import Proposition
|
9 |
+
from aee_utils import get_proposition_by_id, get_linked_propositions
|
10 |
+
except ImportError:
|
11 |
+
print("Error: Could not import dependencies from aee_core_classes_era.py or aee_utils.py.")
|
12 |
+
Proposition = None
|
13 |
+
|
14 |
+
# --- Açıklama Üretme Fonksiyonu (Era) ---
|
15 |
+
|
16 |
+
def generate_explanation_era(prop_id: str, kb: Dict[str, Proposition]) -> str:
|
17 |
+
"""
|
18 |
+
Verilen ID'ye sahip önermenin epistemik durumu hakkında (Plausibility dahil)
|
19 |
+
insan tarafından okunabilir bir açıklama metni üretir.
|
20 |
+
"""
|
21 |
+
if not Proposition: return "Error: Proposition class not available."
|
22 |
+
|
23 |
+
prop = get_proposition_by_id(prop_id, kb)
|
24 |
+
if not prop:
|
25 |
+
return f"Error: Proposition with ID '{prop_id}' not found in the Knowledge Base."
|
26 |
+
|
27 |
+
ep_data = prop.epistemic_data
|
28 |
+
|
29 |
+
# Bağlantıları al
|
30 |
+
supporters = get_linked_propositions(prop_id, kb, link_type='supports')
|
31 |
+
contradictors = get_linked_propositions(prop_id, kb, link_type='contradicts')
|
32 |
+
|
33 |
+
# Metin parçalarını oluştur (Era versiyonu)
|
34 |
+
explanation_lines = []
|
35 |
+
explanation_lines.append(f"--- Epistemic Explanation (Era) for Proposition ID: {prop.prop_id[:8]} ---")
|
36 |
+
explanation_lines.append(f"Statement : '{prop.text_span}'")
|
37 |
+
explanation_lines.append(f"Extracted Struct : {'[NEGATED] ' if prop.is_negated else ''}"
|
38 |
+
f"{prop.subject_lemma} - {prop.relation_lemma} - {prop.value_lemma}")
|
39 |
+
explanation_lines.append("-" * 20)
|
40 |
+
explanation_lines.append(f"Source : {ep_data.source_id} (Type: {ep_data.source_type or 'N/A'})")
|
41 |
+
explanation_lines.append(f"Timestamp : {ep_data.timestamp.strftime('%Y-%m-%d %H:%M:%S')}")
|
42 |
+
explanation_lines.append(f"Source Reliability: {ep_data.reliability_score:.2f}" if ep_data.reliability_score is not None else "N/A")
|
43 |
+
# Plausibility bilgilerini ekle
|
44 |
+
explanation_lines.append(f"Plausibility Score: {ep_data.plausibility_score:.2f}" if ep_data.plausibility_score is not None else "N/A")
|
45 |
+
explanation_lines.append(f"Validation Notes : [{', '.join(ep_data.validation_notes) if ep_data.validation_notes else 'None'}]")
|
46 |
+
explanation_lines.append(f"Confidence Score : {ep_data.computed_confidence:.3f} (Initial: {ep_data.initial_confidence:.2f})")
|
47 |
+
explanation_lines.append("-" * 20)
|
48 |
+
explanation_lines.append(f"Supporting Props ({len(supporters)}): "
|
49 |
+
f"[{', '.join([p.prop_id[:8] for p in supporters]) if supporters else 'None'}]")
|
50 |
+
explanation_lines.append(f"Contradicting Props ({len(contradictors)}): "
|
51 |
+
f"[{', '.join([p.prop_id[:8] for p in contradictors]) if contradictors else 'None'}]")
|
52 |
+
explanation_lines.append("-" * 20)
|
53 |
+
explanation_lines.append(f"Potential Bias Flags: "
|
54 |
+
f"[{', '.join(ep_data.bias_flags) if ep_data.bias_flags else 'None'}]")
|
55 |
+
explanation_lines.append("--- End of Explanation ---")
|
56 |
+
|
57 |
+
return "\n".join(explanation_lines)
|
58 |
+
|
59 |
+
|
60 |
+
# --- Test Bloğu ---
|
61 |
+
if __name__ == "__main__":
|
62 |
+
print("Testing AEE Explainer Module (Era Version)...")
|
63 |
+
|
64 |
+
if Proposition:
|
65 |
+
from aee_core_classes_era import EpistemicData # Era class'ını import et
|
66 |
+
print("Creating a mock Knowledge Base for Era explainer testing...")
|
67 |
+
kb_test: Dict[str, Proposition] = {}
|
68 |
+
|
69 |
+
# Örnek önermeler (plausibility ve notlar dahil)
|
70 |
+
ed1 = EpistemicData(source_id="src1:news.com", initial_confidence=0.8, computed_confidence=0.85, reliability_score=0.7, source_type='news')
|
71 |
+
prop1 = Proposition("sky is blue", "sky is blue", ed1, "sky", "be", "blue", False)
|
72 |
+
prop1.epistemic_data.plausibility_score = 0.95 # Manuel test plausibility
|
73 |
+
|
74 |
+
ed2 = EpistemicData(source_id="src2:user_blog", initial_confidence=0.4, computed_confidence=0.2, reliability_score=0.3, source_type='blog')
|
75 |
+
prop2 = Proposition("sky is green", "sky is green", ed2, "sky", "be", "green", False)
|
76 |
+
prop2.epistemic_data.plausibility_score = 0.15 # Manuel test plausibility
|
77 |
+
prop2.epistemic_data.validation_notes.append("Low plausibility based on common knowledge")
|
78 |
+
prop2.epistemic_data.bias_flags.append("UNCOMMON_CLAIM")
|
79 |
+
|
80 |
+
# Manuel link ekleyelim (örnek amaçlı)
|
81 |
+
prop1.epistemic_data.contradicts.append(prop2.prop_id) # Gerçekte bunlar çelişmez ama test için ekleyelim
|
82 |
+
prop2.epistemic_data.contradicts.append(prop1.prop_id)
|
83 |
+
|
84 |
+
kb_test[prop1.prop_id] = prop1
|
85 |
+
kb_test[prop2.prop_id] = prop2
|
86 |
+
print(f"Mock KB created with {len(kb_test)} propositions.")
|
87 |
+
|
88 |
+
# Test 1: Açıklama üretme (prop1 için)
|
89 |
+
print("\n--- Generating Explanation for Prop 1 (Era) ---")
|
90 |
+
explanation1 = generate_explanation_era(prop1.prop_id, kb_test)
|
91 |
+
print(explanation1)
|
92 |
+
|
93 |
+
# Test 2: Açıklama üretme (prop2 için - plausibility düşük)
|
94 |
+
print("\n--- Generating Explanation for Prop 2 (Era - Low Plausibility) ---")
|
95 |
+
explanation2 = generate_explanation_era(prop2.prop_id, kb_test)
|
96 |
+
print(explanation2)
|
97 |
+
else:
|
98 |
+
print("Could not run tests because Proposition class import failed.")
|
99 |
+
|
100 |
+
print("\nExplainer module (Era) testing complete.")
|
aee_extractor_era.py
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_extractor_era.py
|
2 |
+
# Metinleri işler ve AEE Projesi için temel önermeleri çıkarır.
|
3 |
+
# Era Sürümü Adım 2a: Başlangıç güven ataması dilbilimsel ipuçlarını dikkate alır.
|
4 |
+
|
5 |
+
import spacy
|
6 |
+
from spacy.tokens import Doc, Span, Token
|
7 |
+
from datetime import datetime
|
8 |
+
from typing import List, Optional, Tuple
|
9 |
+
|
10 |
+
# Era sürümündeki DOĞRU sınıfları import et
|
11 |
+
try:
|
12 |
+
from aee_core_classes_era import Proposition, EpistemicData
|
13 |
+
except ImportError:
|
14 |
+
print("Extractor Error: Could not import Proposition/EpistemicData class from aee_core_classes_era.py.")
|
15 |
+
Proposition = None; EpistemicData = None
|
16 |
+
|
17 |
+
# --- spaCy Model Yükleme ---
|
18 |
+
NLP_MODEL = None; MODEL_NAME = "en_core_web_sm"
|
19 |
+
try:
|
20 |
+
NLP_MODEL = spacy.load(MODEL_NAME)
|
21 |
+
# print(f"DEBUG Extractor: spaCy model '{MODEL_NAME}' loaded.") # Debug için açılabilir
|
22 |
+
except OSError:
|
23 |
+
print(f"Extractor Error: spaCy English model '{MODEL_NAME}' not found. Please run: python -m spacy download {MODEL_NAME}")
|
24 |
+
|
25 |
+
# --- Bu fonksiyon eksikti ve eklendi: process_with_spacy ---
|
26 |
+
def process_with_spacy(text: str) -> Optional[Doc]:
|
27 |
+
"""
|
28 |
+
Verilen metni spaCy ile işler ve Doc nesnesini döndürür.
|
29 |
+
NLP_MODEL yüklenemezse None döndürür.
|
30 |
+
"""
|
31 |
+
if NLP_MODEL is None:
|
32 |
+
print(f"Error: spaCy model not loaded. Cannot process text.")
|
33 |
+
return None
|
34 |
+
|
35 |
+
try:
|
36 |
+
return NLP_MODEL(text)
|
37 |
+
except Exception as e:
|
38 |
+
print(f"Error processing text with spaCy: {e}")
|
39 |
+
return None
|
40 |
+
|
41 |
+
# --- Yardımcı Fonksiyonlar ---
|
42 |
+
def get_token_lemma(token: Optional[Token]) -> Optional[str]:
|
43 |
+
""" Verilen Token nesnesinin lemma'sını (kökünü) küçük harfle güvenli bir şekilde alır. """
|
44 |
+
return token.lemma_.lower() if token else None
|
45 |
+
|
46 |
+
def find_negation(token: Optional[Token], sentence: Span) -> bool:
|
47 |
+
""" Verilen Token'a (genellikle fiil) veya ilişkili olduğu ana yapıya bağlı bir negasyon olup olmadığını kontrol eder. """
|
48 |
+
if not token: return False
|
49 |
+
# Doğrudan bağlı 'neg'
|
50 |
+
for child in token.children:
|
51 |
+
if child.dep_ == "neg": return True
|
52 |
+
# Yardımcı fiile bağlı 'neg' (örn: "is not running")
|
53 |
+
if token.head != token and token.head.pos_ == "AUX":
|
54 |
+
for child in token.head.children:
|
55 |
+
if child.dep_ == "neg": return True
|
56 |
+
# 'Be' fiiline bağlı 'neg' (örn: "sky is not blue")
|
57 |
+
if token.lemma_ == "be":
|
58 |
+
for child in token.children:
|
59 |
+
if child.dep_ == "neg": return True
|
60 |
+
# Bazen 'not' advmod olabilir (örn: 'is simply not true')
|
61 |
+
for child in token.children:
|
62 |
+
if child.dep_ == "advmod" and child.lemma_ == "not": return True
|
63 |
+
|
64 |
+
# Cümlenin genelinde negasyon var mı diye daha geniş kontrol (basit)
|
65 |
+
# Bu çok güvenilir olmayabilir ama bazı durumları yakalayabilir
|
66 |
+
# for sent_token in sentence:
|
67 |
+
# if sent_token.dep_ == "neg": return True
|
68 |
+
return False
|
69 |
+
|
70 |
+
|
71 |
+
# --- Güven Hesaplama Yardımcıları (Era.2a) ---
|
72 |
+
def get_source_based_confidence(source_id: str) -> float:
|
73 |
+
""" Kaynak ID'sine göre temel bir başlangıç güven skoru döndürür. """
|
74 |
+
source_id_lower = source_id.lower()
|
75 |
+
# Daha fazla tür ve daha belirgin skorlar
|
76 |
+
if "user" in source_id_lower or "comment" in source_id_lower or "diary" in source_id_lower: return 0.45
|
77 |
+
elif "blog" in source_id_lower or "opinion" in source_id_lower or "forum" in source_id_lower: return 0.50
|
78 |
+
elif "news" in source_id_lower or any(domain in source_id_lower for domain in [".com", ".org", ".net"]): return 0.65 # Genel web/haber
|
79 |
+
elif "wiki" in source_id_lower: return 0.70
|
80 |
+
elif "report" in source_id_lower or "fact_sheet" in source_id_lower: return 0.75
|
81 |
+
elif "textbook" in source_id_lower or ".edu" in source_id_lower : return 0.80
|
82 |
+
elif "science" in source_id_lower or "research" in source_id_lower or "expert" in source_id_lower or "paper" in source_id_lower: return 0.85
|
83 |
+
elif "common_knowledge" in source_id_lower: return 0.90
|
84 |
+
else: return 0.55 # Bilinmeyen için biraz daha yüksek varsayılan
|
85 |
+
|
86 |
+
def calculate_linguistic_confidence_modifier(sent: Span) -> float:
|
87 |
+
""" Cümledeki kesinlik/belirsizlik ifadelerine bakarak güven ayarlama çarpanı döndürür. """
|
88 |
+
modifier = 1.0 # Varsayılan: Nötr
|
89 |
+
uncertainty_markers = {"may", "might", "could", "perhaps", "possibly", "suggest", "appear", "seem", "likely", "probably", "believe", "think", "assume", "sometimes"}
|
90 |
+
certainty_markers = {"will", "must", "definitely", "certainly", "undoubtedly", "always", "never", "prove", "confirm", "show", "demonstrate", "fact"} # 'be' fiilleri hariç
|
91 |
+
negation_markers = {"not", "n't", "never", "no", "none"} # find_negation bunu zaten yapmalı ama ek kontrol
|
92 |
+
|
93 |
+
has_uncertainty = False
|
94 |
+
has_certainty = False
|
95 |
+
has_negation_cue = False # Sadece 'not' değil, 'never' gibi kelimeler için
|
96 |
+
|
97 |
+
for token in sent:
|
98 |
+
lemma = token.lemma_.lower()
|
99 |
+
pos = token.pos_
|
100 |
+
dep = token.dep_
|
101 |
+
|
102 |
+
# Belirsizlik İşaretleri
|
103 |
+
if (pos in ["AUX", "VERB"] and lemma in uncertainty_markers) or \
|
104 |
+
(pos == "ADV" and lemma in uncertainty_markers):
|
105 |
+
has_uncertainty = True
|
106 |
+
# print(f" DEBUG LING: Found uncertainty: '{token.text}' ({lemma})")
|
107 |
+
break # Bir tane yeterli
|
108 |
+
|
109 |
+
# Eğer belirsizlik yoksa kesinlik ara (birbirini dışlasın şimdilik)
|
110 |
+
if not has_uncertainty:
|
111 |
+
for token in sent:
|
112 |
+
lemma = token.lemma_.lower()
|
113 |
+
pos = token.pos_
|
114 |
+
if (pos == "ADV" and lemma in certainty_markers) or \
|
115 |
+
(pos == "VERB" and lemma in certainty_markers):
|
116 |
+
has_certainty = True
|
117 |
+
# print(f" DEBUG LING: Found certainty: '{token.text}' ({lemma})")
|
118 |
+
break # Bir tane yeterli
|
119 |
+
|
120 |
+
# Ayarlayıcıyı belirle
|
121 |
+
if has_uncertainty:
|
122 |
+
modifier = 0.80
|
123 |
+
elif has_certainty:
|
124 |
+
modifier = 1.15 # Kesinlik etkisini biraz artıralım
|
125 |
+
|
126 |
+
# print(f" DEBUG LING: Sentence '{sent.text[:30]}...' -> Modifier: {modifier}")
|
127 |
+
return modifier
|
128 |
+
|
129 |
+
# --- Güven sınırları için sabitler (eksikti) ---
|
130 |
+
MIN_CONFIDENCE = 0.01
|
131 |
+
MAX_CONFIDENCE = 0.99
|
132 |
+
|
133 |
+
# --- Ana Önerme Çıkarım Fonksiyonu (Era) ---
|
134 |
+
def extract_propositions_era(doc: Doc, source_id: str) -> List[Proposition]:
|
135 |
+
"""
|
136 |
+
Era Sürümü: Önermeleri çıkarır ve başlangıç güvenini hem kaynağa
|
137 |
+
hem de dilbilimsel ifadelere göre ayarlar.
|
138 |
+
"""
|
139 |
+
propositions: List[Proposition] = []
|
140 |
+
if not doc or not Proposition or not EpistemicData: return propositions
|
141 |
+
|
142 |
+
for sent in doc.sents:
|
143 |
+
# print(f"DEBUG EXTRACT: Processing sentence: '{sent.text}'") # Cümleyi görmek için açılabilir
|
144 |
+
root: Token = sent.root
|
145 |
+
subject: Optional[Token] = None; prop_object: Optional[Token] = None; attribute: Optional[Token] = None
|
146 |
+
|
147 |
+
# Basit Özne/Nesne/Nitelik çıkarımı
|
148 |
+
# Özne bulma (nsubj veya nsubjpass)
|
149 |
+
for token in sent:
|
150 |
+
if token.dep_ in ["nsubj", "nsubjpass"] and token.head == root:
|
151 |
+
subject = token
|
152 |
+
break
|
153 |
+
if not subject:
|
154 |
+
# print(" DEBUG EXTRACT: No subject found for root:", root.text)
|
155 |
+
continue # Özne yoksa atla
|
156 |
+
|
157 |
+
# Nesne veya nitelik bulma
|
158 |
+
for token in sent:
|
159 |
+
if token.head == root:
|
160 |
+
if token.dep_ in ["dobj", "pobj"]: # Direct or prepositional object
|
161 |
+
prop_object = token
|
162 |
+
elif token.dep_ in ["attr", "acomp", "xcomp"]: # Attribute or complement
|
163 |
+
attribute = token
|
164 |
+
# Eğer hem dobj hem attr varsa ne yapmalı? Şimdilik biri yeterli.
|
165 |
+
|
166 |
+
# İlişki ve Değer Belirleme
|
167 |
+
relation_lemma = get_token_lemma(root)
|
168 |
+
subject_lemma = get_token_lemma(subject)
|
169 |
+
value_lemma = None
|
170 |
+
value_token = attribute if attribute else prop_object # Nitelik öncelikli olabilir
|
171 |
+
|
172 |
+
if value_token:
|
173 |
+
value_lemma = get_token_lemma(value_token)
|
174 |
+
# Değere bağlı negasyon var mı? (örn: "not happy")
|
175 |
+
is_negated = find_negation(value_token, sent) # Cümleyi de verelim
|
176 |
+
else:
|
177 |
+
# Eğer değer yoksa (örn: "He runs.") bu yapıyla önerme çıkaramayız
|
178 |
+
# print(f" DEBUG EXTRACT: No value found for S:{subject_lemma} R:{relation_lemma}")
|
179 |
+
continue
|
180 |
+
|
181 |
+
# Fiile bağlı negasyonu kontrol et
|
182 |
+
if not is_negated: # Eğer değere bağlı negasyon yoksa fiile bağlıyı kontrol et
|
183 |
+
is_negated = find_negation(root, sent)
|
184 |
+
|
185 |
+
|
186 |
+
# Anlamlı bir yapı bulunduysa devam et
|
187 |
+
if subject_lemma and relation_lemma and value_lemma:
|
188 |
+
# Güven ve Tipi Hesapla
|
189 |
+
source_based_conf = get_source_based_confidence(source_id)
|
190 |
+
linguistic_modifier = calculate_linguistic_confidence_modifier(sent)
|
191 |
+
initial_confidence = max(MIN_CONFIDENCE, min(MAX_CONFIDENCE, source_based_conf * linguistic_modifier))
|
192 |
+
|
193 |
+
# Kaynak tipini tahmin et
|
194 |
+
source_type = None; sid_lower = source_id.lower()
|
195 |
+
# ... (Önceki tip tahmin kodları aynı) ...
|
196 |
+
if "user" in sid_lower: source_type = "user"
|
197 |
+
elif "news" in sid_lower: source_type = "news"
|
198 |
+
# ... (diğerleri) ...
|
199 |
+
elif "common" in sid_lower: source_type = "common"
|
200 |
+
elif "textbook" in sid_lower: source_type = "textbook"
|
201 |
+
|
202 |
+
|
203 |
+
# Epistemik veriyi oluştur
|
204 |
+
ep_data = EpistemicData(
|
205 |
+
source_id=source_id,
|
206 |
+
initial_confidence=initial_confidence,
|
207 |
+
source_type=source_type
|
208 |
+
)
|
209 |
+
|
210 |
+
# Yeni Proposition nesnesini oluştur
|
211 |
+
new_prop = Proposition(
|
212 |
+
text_span=sent.text, # v1'de tüm cümle, daha sonra geliştirilebilir
|
213 |
+
sentence_text=sent.text,
|
214 |
+
epistemic_data=ep_data,
|
215 |
+
subject_lemma=subject_lemma,
|
216 |
+
relation_lemma=relation_lemma,
|
217 |
+
value_lemma=value_lemma,
|
218 |
+
is_negated=is_negated
|
219 |
+
)
|
220 |
+
# print(f" DEBUG EXTRACT: Extracted: {new_prop}") # Çıkarılanı görmek için açılabilir
|
221 |
+
propositions.append(new_prop)
|
222 |
+
# else:
|
223 |
+
# print(f" DEBUG EXTRACT: Incomplete structure S:{subject_lemma}, R:{relation_lemma}, V:{value_lemma}")
|
224 |
+
|
225 |
+
|
226 |
+
return propositions
|
227 |
+
|
228 |
+
# --- Test Bloğu ---
|
229 |
+
if __name__ == "__main__":
|
230 |
+
print("\nTesting AEE Extractor Module (Era Version - Linguistic Confidence)...")
|
231 |
+
|
232 |
+
if not NLP_MODEL:
|
233 |
+
print("Cannot run tests because spaCy model is not loaded.")
|
234 |
+
else:
|
235 |
+
print("Creating test sentences...")
|
236 |
+
test_sentences = [
|
237 |
+
"The sky is blue.", # Normal doğru cümle
|
238 |
+
"The sky is not blue.", # Negasyon
|
239 |
+
"The sky might be blue.", # Belirsizlik
|
240 |
+
"The sky is definitely blue.", # Kesinlik
|
241 |
+
"System A is bigger than System B.", # İlişkisel
|
242 |
+
]
|
243 |
+
|
244 |
+
for text in test_sentences:
|
245 |
+
print(f"\nProcessing: '{text}'")
|
246 |
+
doc = process_with_spacy(text)
|
247 |
+
if doc:
|
248 |
+
props = extract_propositions_era(doc, "test_source")
|
249 |
+
for prop in props:
|
250 |
+
print(f" Extracted: {prop}")
|
251 |
+
print(f" Subject: {prop.subject_lemma}, Relation: {prop.relation_lemma}, Value: {prop.value_lemma}")
|
252 |
+
print(f" Negated: {prop.is_negated}, Confidence: {prop.epistemic_data.initial_confidence:.2f}")
|
253 |
+
else:
|
254 |
+
print(" Failed to process with spaCy.")
|
aee_linker_era.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_linker_era.py
|
2 |
+
# AEE Era Sürümü: Önermeler arasındaki bağlantıları bulur.
|
3 |
+
# Genişletilmiş zıtlıklar, basit eşanlamlı/ilişki kontrolü içerir.
|
4 |
+
|
5 |
+
from typing import Dict, List, Optional, Set
|
6 |
+
import pprint
|
7 |
+
|
8 |
+
# Era sürümündeki sınıfları import et
|
9 |
+
try:
|
10 |
+
from aee_core_classes_era import Proposition
|
11 |
+
except ImportError:
|
12 |
+
print("Linker Error: Could not import Proposition class from aee_core_classes_era.py.")
|
13 |
+
Proposition = None
|
14 |
+
|
15 |
+
# --- Genişletilmiş Zıtlıklar ve Eşanlamlılar Sözlüğü (Era.2b) ---
|
16 |
+
|
17 |
+
# Daha fazla zıtlık ekleyelim
|
18 |
+
opposites = {
|
19 |
+
"hot": "cold", "fast": "slow", "big": "small", "on": "off", "up": "down",
|
20 |
+
"large": "small", "tall": "short", "good": "bad", "right": "wrong", "left": "right",
|
21 |
+
"true": "false", "correct": "incorrect", "same": "different", "similar":"different",
|
22 |
+
"liquid": "solid", "gas": "solid", "liquid": "gas", # Belki daha iyi yönetilmeli
|
23 |
+
"open": "closed", "light": "dark", "heavy":"light", "happy": "sad", "rich": "poor",
|
24 |
+
"increase": "decrease", "expand": "contract", "allow": "forbid", "permit":"forbid",
|
25 |
+
"warm": "cold", "cool":"warm", "cool":"hot", # 'cool' ara değer
|
26 |
+
"wet": "dry", "full": "empty", "present": "absent", "alive": "dead",
|
27 |
+
"win": "lose", "pass": "fail", "accept": "reject", "remember": "forget",
|
28 |
+
"love": "hate", "friend": "enemy", "begin": "end", "start":"finish", "start":"end",
|
29 |
+
"always": "never", "often": "rarely", "sometimes":"never", # Frekans
|
30 |
+
"safe":"dangerous", "possible":"impossible", "legal":"illegal",
|
31 |
+
"essential": "inessential", "beneficial":"harmful", "great":"terrible" # Daha soyut
|
32 |
+
}
|
33 |
+
# Çift yönlü yap
|
34 |
+
bidirectional_opposites = {}
|
35 |
+
for k, v in opposites.items():
|
36 |
+
bidirectional_opposites.setdefault(k, v) # İlk eşleşmeyi koru
|
37 |
+
bidirectional_opposites.setdefault(v, k) # Tersini de ekle (varsa üzerine yazma)
|
38 |
+
|
39 |
+
# Basit eşanlamlılar (Destek kontrolünde kullanılabilir)
|
40 |
+
synonyms = {
|
41 |
+
"big": "large", "fast": "quick", "rapid": "fast", "begin": "start",
|
42 |
+
"finish": "end", "permit": "allow", "great":"good", # Çok basit liste
|
43 |
+
"essential": "important", "beneficial": "helpful", "harmful":"dangerous"
|
44 |
+
}
|
45 |
+
# Çift yönlü yap
|
46 |
+
bidirectional_synonyms = synonyms.copy()
|
47 |
+
for k, v in synonyms.items():
|
48 |
+
bidirectional_synonyms.setdefault(k, v)
|
49 |
+
bidirectional_synonyms.setdefault(v, k)
|
50 |
+
|
51 |
+
# İlişkisel Çelişki Kuralları (Basit)
|
52 |
+
# Örn: (Subject, Relation, Value)
|
53 |
+
relational_contradictions = {
|
54 |
+
("be", "bigger"): [("be", "smaller"), ("be", "equal")], # X > Y çelişir X < Y veya X = Y ile
|
55 |
+
("be", "smaller"): [("be", "bigger"), ("be", "equal")],
|
56 |
+
("be", "equal"): [("be", "bigger"), ("be", "smaller"), ("be", "different")],
|
57 |
+
# ... daha fazla kural eklenebilir ...
|
58 |
+
}
|
59 |
+
|
60 |
+
|
61 |
+
# --- Yardımcı Fonksiyon ---
|
62 |
+
def print_prop_debug_info(p: Proposition, prefix=""):
|
63 |
+
if not p: return "None"
|
64 |
+
return (f"{prefix}ID:{p.prop_id[:8]}, "
|
65 |
+
f"S:'{p.subject_lemma}', R:'{p.relation_lemma}', V:'{p.value_lemma}', "
|
66 |
+
f"Neg:{p.is_negated}")
|
67 |
+
|
68 |
+
# --- Bağlantı Bulma Fonksiyonu (Era) ---
|
69 |
+
def find_and_link_evidence_era(new_prop: Proposition, kb: Dict[str, Proposition]):
|
70 |
+
"""
|
71 |
+
Era Sürümü: Önermeler arası bağlantıları bulur (Genişletilmiş Zıtlıklar, Eşanlamlılar, İlişkiler).
|
72 |
+
"""
|
73 |
+
if not kb or not Proposition: return
|
74 |
+
new_subj=new_prop.subject_lemma; new_rel=new_prop.relation_lemma; new_val=new_prop.value_lemma; new_neg=new_prop.is_negated; new_id=new_prop.prop_id
|
75 |
+
if not all([new_subj, new_rel, new_val]): return
|
76 |
+
|
77 |
+
for old_prop_id, old_prop in kb.items():
|
78 |
+
if new_id == old_prop_id: continue
|
79 |
+
old_subj=old_prop.subject_lemma; old_rel=old_prop.relation_lemma; old_val=old_prop.value_lemma; old_neg=old_prop.is_negated
|
80 |
+
|
81 |
+
# --- Eşleşme Kontrolleri ---
|
82 |
+
is_contradiction = False
|
83 |
+
is_support = False
|
84 |
+
|
85 |
+
# 1. Aynı Özne ve İlişki Durumu: Değeri veya Negasyonu kontrol et
|
86 |
+
if new_subj is not None and new_subj == old_subj and new_rel is not None and new_rel == old_rel:
|
87 |
+
nv = new_val.strip() if isinstance(new_val, str) else new_val
|
88 |
+
ov = old_val.strip() if isinstance(old_val, str) else old_val
|
89 |
+
|
90 |
+
# 1a. Doğrudan Çelişki
|
91 |
+
if nv == ov and new_neg != old_neg: is_contradiction = True; print(f"[Linker Found]: Direct Contradiction ({new_id[:4]} vs {old_prop_id[:4]})")
|
92 |
+
# 1b. Zıt Kavram Çelişkisi
|
93 |
+
elif (bidirectional_opposites.get(nv) == ov or bidirectional_opposites.get(ov) == nv) and new_neg == old_neg: is_contradiction = True; print(f"[Linker Found]: Opposing Concept ('{nv}' vs '{ov}') ({new_id[:4]} vs {old_prop_id[:4]})")
|
94 |
+
# 1c. Destek (Aynı veya Eşanlamlı Değer)
|
95 |
+
elif new_neg == old_neg and (nv == ov or bidirectional_synonyms.get(nv) == ov or bidirectional_synonyms.get(ov) == nv): is_support = True; print(f"[Linker Found]: Support (Same/Synonym Value) ({new_id[:4]} vs {old_prop_id[:4]})")
|
96 |
+
|
97 |
+
# 2. İlişkisel Çelişki Durumu (Aynı Özne ve Değer, farklı ilişki)
|
98 |
+
# Örn: X > Y vs X < Y (Burada Y değer oluyor)
|
99 |
+
elif new_subj is not None and new_subj == old_subj and new_val is not None and new_val == old_val and new_neg == old_neg:
|
100 |
+
rel_key_new = (new_rel, new_val) # (ilişki, değer)
|
101 |
+
rel_key_old = (old_rel, old_val) # (ilişki, değer) - değerler aynı
|
102 |
+
|
103 |
+
# TODO: Bu kısım daha genel hale getirilmeli. Şimdilik basit karşılaştırmalar.
|
104 |
+
# Örneğin: 'bigger' vs 'smaller' gibi ilişkiler
|
105 |
+
if bidirectional_opposites.get(new_rel) == old_rel or bidirectional_opposites.get(old_rel) == new_rel:
|
106 |
+
is_contradiction = True; print(f"[Linker Found]: Opposing Relation ('{new_rel}' vs '{old_rel}') for same Subj/Val ({new_id[:4]} vs {old_prop_id[:4]})")
|
107 |
+
|
108 |
+
|
109 |
+
# 3. TODO: Daha karmaşık ilişkiler (Entailment vb.) buraya eklenebilir.
|
110 |
+
|
111 |
+
|
112 |
+
# --- Bağlantıları Güncelle ---
|
113 |
+
if is_contradiction:
|
114 |
+
if old_prop_id not in new_prop.epistemic_data.contradicts: new_prop.epistemic_data.contradicts.append(old_prop_id)
|
115 |
+
if new_id not in old_prop.epistemic_data.contradicts: old_prop.epistemic_data.contradicts.append(new_id)
|
116 |
+
elif is_support:
|
117 |
+
if old_prop_id not in new_prop.epistemic_data.supports: new_prop.epistemic_data.supports.append(old_prop_id)
|
118 |
+
if new_id not in old_prop.epistemic_data.supports: old_prop.epistemic_data.supports.append(new_id)
|
119 |
+
|
120 |
+
|
121 |
+
# --- Test Bloğu ---
|
122 |
+
if __name__ == "__main__":
|
123 |
+
print("Testing AEE Linker Module (Era Version - Enhanced Relations)...")
|
124 |
+
if Proposition:
|
125 |
+
from aee_core_classes_era import EpistemicData
|
126 |
+
kb_test: Dict[str, Proposition] = {}
|
127 |
+
print("\nCreating Mock Propositions...")
|
128 |
+
# Örnekler
|
129 |
+
ed1=EpistemicData(source_id="src1"); p1 = Proposition("A > B", "A is bigger than B", ed1, "a", "be", "bigger", False); kb_test[p1.prop_id]=p1
|
130 |
+
ed2=EpistemicData(source_id="src2"); p2 = Proposition("A < B", "A is smaller than B", ed2, "a", "be", "smaller", False) # p1 ile çelişmeli (ilişki)
|
131 |
+
ed3=EpistemicData(source_id="src3"); p3 = Proposition("C is fast", "C runs fast", ed3, "c", "run", "fast", False)
|
132 |
+
ed4=EpistemicData(source_id="src4"); p4 = Proposition("C is quick", "C is quick", ed4, "c", "be", "quick", False) # p3 ile eşanlamlı değer (ilişki farklı) - BULAMAZ
|
133 |
+
ed5=EpistemicData(source_id="src5"); p5 = Proposition("D is large", "D is large", ed5, "d", "be", "large", False)
|
134 |
+
ed6=EpistemicData(source_id="src6"); p6 = Proposition("D is big", "D is big", ed6, "d", "be", "big", False) # p5 ile eşanlamlı değer (destek)
|
135 |
+
ed7=EpistemicData(source_id="src7"); p7 = Proposition("E is hot", "E is hot", ed7, "e", "be", "hot", False)
|
136 |
+
ed8=EpistemicData(source_id="src8"); p8 = Proposition("E is not cold", "E is not cold", ed8, "e", "be", "cold", True) # p7 ile çelişmeli (zıt + negasyon)
|
137 |
+
|
138 |
+
# Linklemeyi Çalıştır
|
139 |
+
print("\nRunning Linker...")
|
140 |
+
props_to_link = [p2, p3, p4, p5, p6, p7, p8]
|
141 |
+
for prop in props_to_link:
|
142 |
+
find_and_link_evidence_era(prop, kb_test) # Era linker'ı çağır
|
143 |
+
kb_test[prop.prop_id] = prop # Sonra KB'ye ekle
|
144 |
+
|
145 |
+
# Sonuçları Yazdır
|
146 |
+
print("\n--- Final KB State (Enhanced Linker Test) ---")
|
147 |
+
for prop_id, prop_obj in kb_test.items():
|
148 |
+
supports_str = ', '.join([pid[:4] for pid in prop_obj.epistemic_data.supports]) if prop_obj.epistemic_data.supports else "None"
|
149 |
+
contradicts_str = ', '.join([pid[:4] for pid in prop_obj.epistemic_data.contradicts]) if prop_obj.epistemic_data.contradicts else "None"
|
150 |
+
print(f"ID: {prop_id[:8]} ({prop_obj.subject_lemma} {prop_obj.relation_lemma} {prop_obj.value_lemma}) | Supports: [{supports_str}] | Contradicts: [{contradicts_str}]")
|
151 |
+
|
152 |
+
else: print("Could not run tests due to import error.")
|
153 |
+
print("\nEnhanced Linker module testing complete.")
|
aee_updater.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_updater.py
|
2 |
+
# AEE v2.0: Kaynak güvenilirliğini ve önerme güven skorlarını dinamik olarak günceller.
|
3 |
+
# v2.0.1: Kaynak güvenilirliği hesaplama mantığı düzeltildi/basitleştirildi.
|
4 |
+
|
5 |
+
import math
|
6 |
+
from typing import Dict, List, Optional
|
7 |
+
from collections import defaultdict
|
8 |
+
|
9 |
+
# v2.0 klasöründeki sınıfları import et
|
10 |
+
from aee_core_classes import Proposition
|
11 |
+
|
12 |
+
# --- Yardımcı Sabitler ve Parametreler ---
|
13 |
+
DEFAULT_SOURCE_RELIABILITY = 0.6 # Varsayılanı biraz artıralım
|
14 |
+
RELIABLE_SOURCE_SCORE = 0.75 # Çelişkisi olmayan kaynak skoru
|
15 |
+
UNRELIABLE_SOURCE_SCORE = 0.35 # Çelişkisi olan kaynak skoru
|
16 |
+
MIN_CONFIDENCE = 0.01
|
17 |
+
MAX_CONFIDENCE = 0.99
|
18 |
+
# MIN/MAX_RELIABILITY artık doğrudan atanıyor
|
19 |
+
|
20 |
+
# Güven güncelleme formülü için ağırlıklar (aynı)
|
21 |
+
SUPPORT_WEIGHT = 0.10
|
22 |
+
CONTRADICTION_WEIGHT = 0.35
|
23 |
+
RELIABILITY_DAMPENING_FACTOR = 0.5
|
24 |
+
|
25 |
+
# --- Kaynak Güvenilirliği Hesaplama (Düzeltilmiş/Basitleştirilmiş Mantık) ---
|
26 |
+
def calculate_source_reliability_v2(source_id: str, kb: Dict[str, Proposition]) -> float:
|
27 |
+
"""
|
28 |
+
Belirli bir kaynağın güvenilirliğini, KB'deki o kaynaktan gelen
|
29 |
+
önermelerin *herhangi bir çelişkiye karışıp karışmadığına* göre basitçe belirler.
|
30 |
+
"""
|
31 |
+
props_from_source = [p for p in kb.values() if p.epistemic_data.source_id == source_id]
|
32 |
+
|
33 |
+
if not props_from_source:
|
34 |
+
return DEFAULT_SOURCE_RELIABILITY
|
35 |
+
|
36 |
+
has_any_contradiction = False
|
37 |
+
for prop in props_from_source:
|
38 |
+
if prop.epistemic_data.contradicts: # Bu kaynaktan gelen bir önermenin çelişkisi var mı?
|
39 |
+
has_any_contradiction = True
|
40 |
+
break # Bir tane bulmak yeterli
|
41 |
+
|
42 |
+
if has_any_contradiction:
|
43 |
+
# print(f"DEBUG Reliability: Source {source_id} flagged as UNRELIABLE.")
|
44 |
+
return UNRELIABLE_SOURCE_SCORE
|
45 |
+
else:
|
46 |
+
# print(f"DEBUG Reliability: Source {source_id} flagged as RELIABLE.")
|
47 |
+
return RELIABLE_SOURCE_SCORE
|
48 |
+
|
49 |
+
# --- Önerme Güven Skoru Güncelleme (Aynı kaldı) ---
|
50 |
+
def update_proposition_confidence_v2(prop: Proposition, kb: Dict[str, Proposition], source_reliability_scores: Dict[str, float]):
|
51 |
+
"""
|
52 |
+
Bir önermenin 'computed_confidence' değerini, başlangıç güveni, kaynak
|
53 |
+
güvenilirliği ve destekleyici/çelişkili kanıtların güven skorlarına göre günceller.
|
54 |
+
Not: Bu fonksiyon doğrudan prop nesnesini değiştirir.
|
55 |
+
"""
|
56 |
+
ep_data = prop.epistemic_data
|
57 |
+
initial_conf = ep_data.initial_confidence
|
58 |
+
# Hesaplanan güvenilirliği kullan, yoksa varsayılanı kullan
|
59 |
+
source_reliability = source_reliability_scores.get(ep_data.source_id, DEFAULT_SOURCE_RELIABILITY)
|
60 |
+
|
61 |
+
# Güvenilirliği sönümlendirme faktörü ile başlangıç güvenini ayarla
|
62 |
+
adjusted_initial_conf = initial_conf * (1 - RELIABILITY_DAMPENING_FACTOR) + \
|
63 |
+
(initial_conf * source_reliability) * RELIABILITY_DAMPENING_FACTOR
|
64 |
+
|
65 |
+
current_confidence = adjusted_initial_conf
|
66 |
+
|
67 |
+
# Destekleyici kanıtların etkisini ekle
|
68 |
+
total_support_effect = 0.0
|
69 |
+
if ep_data.supports:
|
70 |
+
support_count = 0 # Debug
|
71 |
+
for supporter_id in ep_data.supports:
|
72 |
+
supporter_prop = kb.get(supporter_id)
|
73 |
+
if supporter_prop:
|
74 |
+
support_count += 1
|
75 |
+
support_gain = SUPPORT_WEIGHT * supporter_prop.epistemic_data.computed_confidence * (1 - current_confidence)
|
76 |
+
total_support_effect += support_gain
|
77 |
+
# print(f"DEBUG Confidence {prop.prop_id[:4]}: Support effect calculated from {support_count} supporters.")
|
78 |
+
current_confidence += total_support_effect
|
79 |
+
|
80 |
+
# Çelişkili kanıtların etkisini çıkar
|
81 |
+
total_contradiction_effect = 0.0
|
82 |
+
if ep_data.contradicts:
|
83 |
+
contradiction_count = 0 # Debug
|
84 |
+
for contradictor_id in ep_data.contradicts:
|
85 |
+
contradictor_prop = kb.get(contradictor_id)
|
86 |
+
if contradictor_prop:
|
87 |
+
contradiction_count += 1
|
88 |
+
contradiction_loss = CONTRADICTION_WEIGHT * contradictor_prop.epistemic_data.computed_confidence * current_confidence
|
89 |
+
total_contradiction_effect += contradiction_loss
|
90 |
+
# print(f"DEBUG Confidence {prop.prop_id[:4]}: Contradiction effect calculated from {contradiction_count} contradictors.")
|
91 |
+
current_confidence -= total_contradiction_effect
|
92 |
+
|
93 |
+
# Güveni sınırlar içinde tut
|
94 |
+
ep_data.computed_confidence = max(MIN_CONFIDENCE, min(MAX_CONFIDENCE, current_confidence))
|
95 |
+
|
96 |
+
# print(f"DEBUG Confidence {prop.prop_id[:4]}: Final computed confidence: {ep_data.computed_confidence:.3f}")
|
97 |
+
|
98 |
+
|
99 |
+
# --- Toplu Güncelleme Fonksiyonu (Aynı kaldı) ---
|
100 |
+
def run_updates_v2(kb: Dict[str, Proposition]) -> Dict[str, Proposition]:
|
101 |
+
"""
|
102 |
+
Tüm KB üzerinde kaynak güvenilirliğini hesaplar ve ardından tüm
|
103 |
+
önermelerin güven skorlarını günceller. Değiştirilmiş KB'yi döndürür.
|
104 |
+
"""
|
105 |
+
print("\nRunning v2 Updates (Reliability & Confidence)...")
|
106 |
+
if not kb:
|
107 |
+
print("Knowledge Base is empty. No updates to run.")
|
108 |
+
return kb
|
109 |
+
|
110 |
+
# 1. Adım: Tüm kaynakların güvenilirliğini hesapla
|
111 |
+
print(" Calculating source reliabilities...")
|
112 |
+
source_ids = set(p.epistemic_data.source_id for p in kb.values())
|
113 |
+
source_reliability_scores: Dict[str, float] = {}
|
114 |
+
for source_id in source_ids:
|
115 |
+
reliability = calculate_source_reliability_v2(source_id, kb)
|
116 |
+
source_reliability_scores[source_id] = reliability
|
117 |
+
# Hesaplanan güvenilirliği ilgili önermelerin EpistemicData'sına da yazalım
|
118 |
+
for prop in kb.values():
|
119 |
+
if prop.epistemic_data.source_id == source_id:
|
120 |
+
prop.epistemic_data.reliability_score = reliability
|
121 |
+
|
122 |
+
# 2. Adım: Tüm önermelerin güven skorlarını güncelle
|
123 |
+
print(" Updating proposition confidences...")
|
124 |
+
propositions_to_update = list(kb.values())
|
125 |
+
for prop in propositions_to_update:
|
126 |
+
update_proposition_confidence_v2(prop, kb, source_reliability_scores)
|
127 |
+
|
128 |
+
print("Updates complete.")
|
129 |
+
return kb
|
130 |
+
|
131 |
+
# --- Test Bloğu (Aynı kaldı) ---
|
132 |
+
if __name__ == "__main__":
|
133 |
+
# ... (Önceki test bloğu kodu burada - değişiklik yok) ...
|
134 |
+
print("\nTesting AEE Updater Module (v2.0.1 - with updated reliability)...")
|
135 |
+
from aee_core_classes import EpistemicData # Test için gerekli
|
136 |
+
|
137 |
+
# Test için sahte KB oluşturalım
|
138 |
+
kb_test: Dict[str, Proposition] = {}
|
139 |
+
|
140 |
+
# Güvenilir Kaynak 1
|
141 |
+
src_reliable = "reliable_source.com"
|
142 |
+
ed_r1 = EpistemicData(source_id=src_reliable, initial_confidence=0.9)
|
143 |
+
p_r1 = Proposition("sky is blue", "sky is blue", ed_r1, "sky", "be", "blue")
|
144 |
+
kb_test[p_r1.prop_id] = p_r1
|
145 |
+
|
146 |
+
ed_r2 = EpistemicData(source_id=src_reliable, initial_confidence=0.85)
|
147 |
+
p_r2 = Proposition("grass is green", "grass is green", ed_r2, "grass", "be", "green")
|
148 |
+
kb_test[p_r2.prop_id] = p_r2
|
149 |
+
|
150 |
+
# Güvenilmez Kaynak 1
|
151 |
+
src_unreliable = "unreliable_source.net"
|
152 |
+
ed_u1 = EpistemicData(source_id=src_unreliable, initial_confidence=0.4)
|
153 |
+
p_u1 = Proposition("sky is green", "sky is green", ed_u1, "sky", "be", "green")
|
154 |
+
kb_test[p_u1.prop_id] = p_u1
|
155 |
+
|
156 |
+
ed_u2 = EpistemicData(source_id=src_unreliable, initial_confidence=0.3)
|
157 |
+
p_u2 = Proposition("sky is not blue", "sky is not blue", ed_u2, "sky", "be", "blue", is_negated=True) # p_r1 ile çelişir
|
158 |
+
kb_test[p_u2.prop_id] = p_u2
|
159 |
+
|
160 |
+
# Güvenilir Kaynak 2
|
161 |
+
src_reliable2 = "another_reliable.org"
|
162 |
+
ed_r3 = EpistemicData(source_id=src_reliable2, initial_confidence=0.8)
|
163 |
+
p_r3 = Proposition("sky is blue", "reports say sky is blue", ed_r3, "sky", "be", "blue") # p_r1'i destekler
|
164 |
+
kb_test[p_r3.prop_id] = p_r3
|
165 |
+
|
166 |
+
print(f"\nInitial KB state (Before Linking & Updates): {len(kb_test)} propositions")
|
167 |
+
|
168 |
+
# Linkleri manuel simüle et
|
169 |
+
p_r1.epistemic_data.contradicts.append(p_u2.prop_id)
|
170 |
+
p_u2.epistemic_data.contradicts.append(p_r1.prop_id)
|
171 |
+
p_r1.epistemic_data.supports.append(p_r3.prop_id)
|
172 |
+
p_r3.epistemic_data.supports.append(p_r1.prop_id)
|
173 |
+
|
174 |
+
print("\nSimulated Linking complete.")
|
175 |
+
|
176 |
+
# Güncellemeleri çalıştır
|
177 |
+
updated_kb_test = run_updates_v2(kb_test)
|
178 |
+
|
179 |
+
print("\n--- Final KB State (After Updates) ---")
|
180 |
+
for prop_id, prop_obj in updated_kb_test.items():
|
181 |
+
print(f"ID: {prop_id[:8]} | Src: {prop_obj.epistemic_data.source_id} "
|
182 |
+
f"| InitConf: {prop_obj.epistemic_data.initial_confidence:.2f} "
|
183 |
+
f"| SrcRel: {prop_obj.epistemic_data.reliability_score:.2f} "
|
184 |
+
f"| FinalConf: {prop_obj.epistemic_data.computed_confidence:.3f} "
|
185 |
+
f"| Supports: {len(prop_obj.epistemic_data.supports)} "
|
186 |
+
f"| Contradicts: {len(prop_obj.epistemic_data.contradicts)}")
|
187 |
+
|
188 |
+
print("\nUpdater module testing complete.")
|
aee_updater_era.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_updater_era.py
|
2 |
+
# AEE Era Sürümü: Güven/Güvenilirlik güncellemeleri + Plausibility + Bias/Cycle Ceza
|
3 |
+
# v1.0.1 (Era): Bias flag kontrolü için debug print eklendi.
|
4 |
+
|
5 |
+
import math
|
6 |
+
from typing import Dict, List, Optional, Set
|
7 |
+
from collections import defaultdict
|
8 |
+
|
9 |
+
try:
|
10 |
+
from aee_core_classes_era import Proposition
|
11 |
+
except ImportError:
|
12 |
+
print("Updater Error: Could not import Proposition class from aee_core_classes_era.py.")
|
13 |
+
Proposition = None
|
14 |
+
|
15 |
+
# --- Sabitler ---
|
16 |
+
DEFAULT_SOURCE_RELIABILITY = 0.6; RELIABLE_SOURCE_SCORE = 0.75; UNRELIABLE_SOURCE_SCORE = 0.35
|
17 |
+
MIN_CONFIDENCE = 0.01; MAX_CONFIDENCE = 0.99; MIN_RELIABILITY = 0.1; MAX_RELIABILITY = 1.0
|
18 |
+
SUPPORT_WEIGHT = 0.10; CONTRADICTION_WEIGHT = 0.35; RELIABILITY_DAMPENING_FACTOR = 0.5
|
19 |
+
BIAS_PENALTY_MULTIPLIER = 0.85; CIRCULAR_SUPPORT_PENALTY_MULTIPLIER = 0.75
|
20 |
+
PLAUSIBILITY_WEIGHT_FACTOR = 1.0
|
21 |
+
|
22 |
+
# --- Güvenilirlik Hesaplama ---
|
23 |
+
def calculate_source_reliability_era(source_id: str, kb: Dict[str, Proposition]) -> float:
|
24 |
+
# ... (Aynı) ...
|
25 |
+
if not Proposition: return DEFAULT_SOURCE_RELIABILITY
|
26 |
+
props_from_source = [p for p in kb.values() if p.epistemic_data.source_id == source_id]
|
27 |
+
if not props_from_source: return DEFAULT_SOURCE_RELIABILITY
|
28 |
+
has_any_contradiction = any(hasattr(prop.epistemic_data, 'contradicts') and prop.epistemic_data.contradicts for prop in props_from_source)
|
29 |
+
return UNRELIABLE_SOURCE_SCORE if has_any_contradiction else RELIABLE_SOURCE_SCORE
|
30 |
+
|
31 |
+
# --- Döngü Tespiti ---
|
32 |
+
def detect_circular_support_era(kb: Dict[str, Proposition]):
|
33 |
+
# ... (Aynı) ...
|
34 |
+
if not Proposition: return
|
35 |
+
# print(" Running Circular Support Detection...")
|
36 |
+
prop_ids = list(kb.keys()); visited_globally = set(); flagged_props_count = 0; circular_support_flag = "CIRCULAR_SUPPORT"
|
37 |
+
for start_node_id in prop_ids:
|
38 |
+
if start_node_id not in visited_globally:
|
39 |
+
recursion_stack = set(); path = []
|
40 |
+
def dfs_visit(current_node_id):
|
41 |
+
nonlocal flagged_props_count; visited_globally.add(current_node_id); recursion_stack.add(current_node_id); path.append(current_node_id)
|
42 |
+
current_prop = kb.get(current_node_id)
|
43 |
+
if not current_prop or not getattr(current_prop.epistemic_data, 'supports', None): recursion_stack.remove(current_node_id); path.pop(); return False
|
44 |
+
for neighbour_id in current_prop.epistemic_data.supports:
|
45 |
+
if neighbour_id not in visited_globally:
|
46 |
+
if dfs_visit(neighbour_id): return True
|
47 |
+
elif neighbour_id in recursion_stack:
|
48 |
+
# print(f" -> Circular Support Detected involving node {neighbour_id[:8]}!")
|
49 |
+
try:
|
50 |
+
cycle_start_index = path.index(neighbour_id); cycle_nodes = path[cycle_start_index:]
|
51 |
+
# print(f" Cycle: {' -> '.join([p[:8] for p in cycle_nodes])}")
|
52 |
+
for node_id_in_cycle in cycle_nodes:
|
53 |
+
node_prop = kb.get(node_id_in_cycle)
|
54 |
+
if node_prop and circular_support_flag not in node_prop.epistemic_data.bias_flags: node_prop.epistemic_data.bias_flags.append(circular_support_flag); flagged_props_count += 1
|
55 |
+
except ValueError: pass
|
56 |
+
return True
|
57 |
+
recursion_stack.remove(current_node_id); path.pop(); return False
|
58 |
+
dfs_visit(start_node_id)
|
59 |
+
print(f" Circular Support Detection complete. Flagged {flagged_props_count} propositions.")
|
60 |
+
|
61 |
+
|
62 |
+
# --- Güven Güncelleme (Era - Debug Eklendi) ---
|
63 |
+
def update_proposition_confidence_era(prop: Proposition, kb: Dict[str, Proposition], source_reliability_scores: Dict[str, float]):
|
64 |
+
if not Proposition: return
|
65 |
+
ep_data = prop.epistemic_data; initial_conf = ep_data.initial_confidence
|
66 |
+
source_reliability = source_reliability_scores.get(ep_data.source_id, DEFAULT_SOURCE_RELIABILITY)
|
67 |
+
adjusted_initial_conf = initial_conf * (1 - RELIABILITY_DAMPENING_FACTOR) + (initial_conf * source_reliability) * RELIABILITY_DAMPENING_FACTOR
|
68 |
+
current_confidence = adjusted_initial_conf
|
69 |
+
total_support_effect = 0.0
|
70 |
+
if ep_data.supports:
|
71 |
+
for supporter_id in ep_data.supports:
|
72 |
+
supporter_prop = kb.get(supporter_id)
|
73 |
+
if supporter_prop: total_support_effect += SUPPORT_WEIGHT * supporter_prop.epistemic_data.computed_confidence * (1 - current_confidence)
|
74 |
+
current_confidence += total_support_effect
|
75 |
+
total_contradiction_effect = 0.0
|
76 |
+
if ep_data.contradicts:
|
77 |
+
for contradictor_id in ep_data.contradicts:
|
78 |
+
contradictor_prop = kb.get(contradictor_id)
|
79 |
+
if contradictor_prop: total_contradiction_effect += CONTRADICTION_WEIGHT * contradictor_prop.epistemic_data.computed_confidence * current_confidence
|
80 |
+
current_confidence -= total_contradiction_effect
|
81 |
+
|
82 |
+
# !!! YENİ DEBUG PRINT !!!
|
83 |
+
print(f" !!! Checking Bias Flags for Prop {prop.prop_id[:8]} !!! Current Flags: {ep_data.bias_flags}")
|
84 |
+
# Bias ve Çıkarım Cezaları
|
85 |
+
if ep_data.bias_flags: # Bu kontrol neden False dönüyor?
|
86 |
+
print(f" -> Applying penalty for Bias Flags: {ep_data.bias_flags} to Prop {prop.prop_id[:8]}")
|
87 |
+
if "CIRCULAR_SUPPORT" in ep_data.bias_flags:
|
88 |
+
current_confidence *= CIRCULAR_SUPPORT_PENALTY_MULTIPLIER
|
89 |
+
print(f" -> Applied CIRCULAR_SUPPORT penalty. Confidence now: {current_confidence:.3f}")
|
90 |
+
if any(flag != "CIRCULAR_SUPPORT" for flag in ep_data.bias_flags):
|
91 |
+
current_confidence *= BIAS_PENALTY_MULTIPLIER
|
92 |
+
print(f" -> Applied general BIAS penalty. Confidence now: {current_confidence:.3f}")
|
93 |
+
# else: # Debug: Neden girmediğini gör
|
94 |
+
# print(f" -> No Bias Flags found for Prop {prop.prop_id[:8]} at update time.")
|
95 |
+
|
96 |
+
|
97 |
+
# Plausibility Skoru Etkisi
|
98 |
+
plausibility = ep_data.plausibility_score
|
99 |
+
if plausibility is not None:
|
100 |
+
# print(f" Applying Plausibility ({plausibility:.2f}) to Prop {prop.prop_id[:8]}. Confidence before: {current_confidence:.3f}")
|
101 |
+
current_confidence *= (plausibility * PLAUSIBILITY_WEIGHT_FACTOR + (1-PLAUSIBILITY_WEIGHT_FACTOR))
|
102 |
+
# print(f" -> Confidence after plausibility: {current_confidence:.3f}")
|
103 |
+
|
104 |
+
ep_data.computed_confidence = max(MIN_CONFIDENCE, min(MAX_CONFIDENCE, current_confidence))
|
105 |
+
|
106 |
+
# --- Toplu Güncelleme Fonksiyonu (Era) ---
|
107 |
+
def run_updates_era(kb: Dict[str, Proposition]) -> Dict[str, Proposition]:
|
108 |
+
# ... (Fonksiyonun geri kalanı aynı) ...
|
109 |
+
if not Proposition or not kb: print("Knowledge Base is empty or Proposition class not available."); return kb
|
110 |
+
print("\nRunning Era Updates (Reliability, Cycle Detection & Confidence Refinement)...")
|
111 |
+
print(" Calculating source reliabilities...")
|
112 |
+
source_ids = set(p.epistemic_data.source_id for p in kb.values()); source_reliability_scores: Dict[str, float] = {}
|
113 |
+
for source_id in source_ids:
|
114 |
+
reliability = calculate_source_reliability_era(source_id, kb); source_reliability_scores[source_id] = reliability
|
115 |
+
for prop in kb.values():
|
116 |
+
if prop.epistemic_data.source_id == source_id: prop.epistemic_data.reliability_score = reliability
|
117 |
+
detect_circular_support_era(kb)
|
118 |
+
print(" Updating proposition confidences (Era logic)...")
|
119 |
+
propositions_to_update = list(kb.values()) # Önce listeye alalım
|
120 |
+
for prop in propositions_to_update:
|
121 |
+
update_proposition_confidence_era(prop, kb, source_reliability_scores)
|
122 |
+
print("Updates complete.")
|
123 |
+
return kb
|
124 |
+
|
125 |
+
# --- Test Bloğu ---
|
126 |
+
if __name__ == "__main__":
|
127 |
+
# ... (Test bloğu öncekiyle aynı - değişiklik yok) ...
|
128 |
+
print("\nTesting AEE Updater Module (Era Version - Bias Flag Check)...")
|
129 |
+
# ... (Test kodunun geri kalanı aynı) ...
|
aee_utils.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_utils.py
|
2 |
+
# AEE Projesi için genel yardımcı fonksiyonlar.
|
3 |
+
# v1.0.2 (Era): Import aee_core_classes_era olarak düzeltildi.
|
4 |
+
|
5 |
+
from typing import Dict, List, Optional, Any
|
6 |
+
|
7 |
+
# Era sürümündeki DOĞRU sınıfı import et
|
8 |
+
try:
|
9 |
+
from aee_core_classes_era import Proposition # Düzeltildi!
|
10 |
+
except ImportError:
|
11 |
+
print("Utils Error: Could not import Proposition class from aee_core_classes_era.py.")
|
12 |
+
Proposition = None
|
13 |
+
|
14 |
+
# ... (Fonksiyonların geri kalanı aynı) ...
|
15 |
+
def get_proposition_by_id(prop_id: str, kb: Dict[str, Proposition]) -> Optional[Proposition]:
|
16 |
+
if not Proposition: return None
|
17 |
+
return kb.get(prop_id)
|
18 |
+
|
19 |
+
def get_linked_propositions(prop_id: str, kb: Dict[str, Proposition], link_type: str = 'all') -> List[Proposition]:
|
20 |
+
linked_props: List[Proposition] = []
|
21 |
+
if not Proposition: return linked_props
|
22 |
+
main_prop = get_proposition_by_id(prop_id, kb)
|
23 |
+
if not main_prop: return linked_props
|
24 |
+
link_ids: List[str] = []
|
25 |
+
if hasattr(main_prop, 'epistemic_data') and main_prop.epistemic_data:
|
26 |
+
if link_type == 'supports' or link_type == 'all':
|
27 |
+
if hasattr(main_prop.epistemic_data, 'supports') and main_prop.epistemic_data.supports: link_ids.extend(main_prop.epistemic_data.supports)
|
28 |
+
if link_type == 'contradicts' or link_type == 'all':
|
29 |
+
if hasattr(main_prop.epistemic_data, 'contradicts') and main_prop.epistemic_data.contradicts: link_ids.extend(main_prop.epistemic_data.contradicts)
|
30 |
+
processed_ids = set()
|
31 |
+
for linked_id in link_ids:
|
32 |
+
if linked_id not in processed_ids:
|
33 |
+
linked_prop = get_proposition_by_id(linked_id, kb)
|
34 |
+
if linked_prop: linked_props.append(linked_prop)
|
35 |
+
processed_ids.add(linked_id)
|
36 |
+
return linked_props
|
37 |
+
|
38 |
+
if __name__ == "__main__":
|
39 |
+
print("aee_utils.py loaded...") # Kısaltıldı
|
40 |
+
if Proposition: print("Proposition class imported successfully from aee_core_classes_era.")
|
41 |
+
else: print("Proposition class could not be imported.")
|
aee_validator.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# aee_validator.py
|
2 |
+
# AEE Era Sürümü: Önermelerin makullüğünü/gerçekliğini kontrol eder.
|
3 |
+
|
4 |
+
from typing import Dict, List, Optional, Tuple
|
5 |
+
|
6 |
+
try:
|
7 |
+
# Era sürümündeki sınıfları import et
|
8 |
+
from aee_core_classes_era import Proposition
|
9 |
+
except ImportError:
|
10 |
+
print("Error: Could not import Proposition class from aee_core_classes_era.py.")
|
11 |
+
Proposition = None
|
12 |
+
|
13 |
+
# --- Makullük Kontrol Fonksiyonu ---
|
14 |
+
|
15 |
+
def check_plausibility_v_era(proposition: Proposition) -> Tuple[Optional[float], List[str]]:
|
16 |
+
"""
|
17 |
+
Verilen bir önermenin genel makullüğünü/olabilirliğini değerlendirir.
|
18 |
+
Era sürümünde bu fonksiyon, harici bilgiye (simüle edilmiş veya gerçek)
|
19 |
+
başvurarak daha akıllı hale getirilecektir.
|
20 |
+
Şimdilik basit bir varsayılan değer döndürür.
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
Tuple[Optional[float], List[str]]: (Plausibility Score [0.0-1.0], Validation Notes List)
|
24 |
+
Score None ise kontrol yapılamadı demektir.
|
25 |
+
"""
|
26 |
+
if not Proposition or not proposition:
|
27 |
+
return None, ["Error: Invalid proposition input."]
|
28 |
+
|
29 |
+
plausibility_score: Optional[float] = None
|
30 |
+
validation_notes: List[str] = []
|
31 |
+
|
32 |
+
# --- BURASI ERA SÜRÜMÜNÜN ÖRTÜK BİLGİ KULLANIM NOKTASI ---
|
33 |
+
# Gerçek uygulamada burada:
|
34 |
+
# 1. Önerme analiz edilir (örn: "gökyüzü", "renk", "yeşil").
|
35 |
+
# 2. Harici bilgi kaynağına sorgu gönderilir (örn: Web search, Knowledge Graph,
|
36 |
+
# veya benim tarafımdan eğitilmiş/değerlendirilmiş bir model).
|
37 |
+
# 3. Gelen sonuca göre skor ve notlar belirlenir.
|
38 |
+
|
39 |
+
# Şimdilik (Subtlety için ve adım adım gitmek için):
|
40 |
+
# Varsayılan olarak makul kabul edelim (1.0) ve not bırakmayalım.
|
41 |
+
# Ana script'teki örnekler için manuel skorları SİMÜLE EDEBİLİRİZ.
|
42 |
+
# VEYA basit anahtar kelime kontrolleri eklenebilir:
|
43 |
+
subject = proposition.subject_lemma
|
44 |
+
value = proposition.value_lemma
|
45 |
+
if subject == "sky" and value not in ["blue", "grey", "gray", "black", "red", "orange", "pink", "purple"]: # Bilinen gökyüzü renkleri dışındaysa?
|
46 |
+
plausibility_score = 0.2
|
47 |
+
validation_notes.append("Value is an uncommon color for the sky.")
|
48 |
+
elif subject == "water" and value in ["solid", "liquid", "gas", "steam", "ice"]: # Bilinen su halleri
|
49 |
+
plausibility_score = 0.9
|
50 |
+
# ... başka basit sağduyu kuralları eklenebilir ...
|
51 |
+
else:
|
52 |
+
# Diğer durumlar için varsayılan skor
|
53 |
+
plausibility_score = 0.8 # Bilinmeyen durumlar için biraz daha düşük?
|
54 |
+
|
55 |
+
# --- KONTROL BİTTİ ---
|
56 |
+
|
57 |
+
# Skorun sınırlar içinde olduğundan emin ol (eğer atandıysa)
|
58 |
+
if plausibility_score is not None:
|
59 |
+
plausibility_score = max(0.0, min(1.0, plausibility_score))
|
60 |
+
|
61 |
+
# print(f"DEBUG Validator: Prop '{proposition.prop_id[:8]}' Plausibility: {plausibility_score}, Notes: {validation_notes}")
|
62 |
+
return plausibility_score, validation_notes
|
63 |
+
|
64 |
+
|
65 |
+
# --- Test Bloğu ---
|
66 |
+
if __name__ == "__main__":
|
67 |
+
print("Testing AEE Validator Module (Era Version)...")
|
68 |
+
|
69 |
+
if Proposition:
|
70 |
+
from aee_core_classes_era import EpistemicData # Test için gerekli
|
71 |
+
print("Creating mock propositions for validator testing...")
|
72 |
+
|
73 |
+
# Örnek 1: Makul
|
74 |
+
ed1 = EpistemicData(source_id="test1")
|
75 |
+
prop1 = Proposition("sky is blue", "sky is blue", ed1, "sky", "be", "blue")
|
76 |
+
score1, notes1 = check_plausibility_v_era(prop1)
|
77 |
+
print(f"\nProp: {prop1.subject_lemma} - {prop1.value_lemma}")
|
78 |
+
print(f" Plausibility Score: {score1}, Notes: {notes1}")
|
79 |
+
|
80 |
+
# Örnek 2: Makul Değil
|
81 |
+
ed2 = EpistemicData(source_id="test2")
|
82 |
+
prop2 = Proposition("sky is green", "sky is green", ed2, "sky", "be", "green")
|
83 |
+
score2, notes2 = check_plausibility_v_era(prop2)
|
84 |
+
print(f"\nProp: {prop2.subject_lemma} - {prop2.value_lemma}")
|
85 |
+
print(f" Plausibility Score: {score2}, Notes: {notes2}")
|
86 |
+
|
87 |
+
# Örnek 3: Bilinmeyen Konu
|
88 |
+
ed3 = EpistemicData(source_id="test3")
|
89 |
+
prop3 = Proposition("Xyz is Fgh", "Xyz is Fgh", ed3, "xyz", "be", "fgh")
|
90 |
+
score3, notes3 = check_plausibility_v_era(prop3)
|
91 |
+
print(f"\nProp: {prop3.subject_lemma} - {prop3.value_lemma}")
|
92 |
+
print(f" Plausibility Score: {score3}, Notes: {notes3}")
|
93 |
+
|
94 |
+
else:
|
95 |
+
print("Could not run tests because Proposition class import failed.")
|
96 |
+
|
97 |
+
print("\nValidator module testing complete.")
|