minor update to readme
Browse files
README.md
CHANGED
@@ -57,30 +57,44 @@ data = ontology.extract()
|
|
57 |
|
58 |
**How use the loaded dataset for LLM4OL Paradigm task settings?**
|
59 |
``` python
|
|
|
60 |
from ontolearner import CopyrightOnto, LearnerPipeline, train_test_split
|
61 |
|
|
|
62 |
ontology = CopyrightOnto()
|
63 |
-
ontology.load()
|
64 |
data = ontology.extract()
|
65 |
|
66 |
# Split into train and test sets
|
67 |
-
train_data, test_data = train_test_split(data, test_size=0.2)
|
68 |
|
69 |
-
#
|
|
|
70 |
pipeline = LearnerPipeline(
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
75 |
)
|
76 |
|
77 |
-
#
|
78 |
-
|
79 |
train_data=train_data,
|
80 |
test_data=test_data,
|
81 |
-
|
82 |
-
|
|
|
83 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
```
|
85 |
|
86 |
For more detailed documentation, see the [](https://ontolearner.readthedocs.io)
|
|
|
57 |
|
58 |
**How use the loaded dataset for LLM4OL Paradigm task settings?**
|
59 |
``` python
|
60 |
+
# Import core modules from the OntoLearner library
|
61 |
from ontolearner import CopyrightOnto, LearnerPipeline, train_test_split
|
62 |
|
63 |
+
# Load the CopyrightOnto ontology, which contains concepts related to wines, their properties, and categories
|
64 |
ontology = CopyrightOnto()
|
65 |
+
ontology.load() # Load entities, types, and structured term annotations from the ontology
|
66 |
data = ontology.extract()
|
67 |
|
68 |
# Split into train and test sets
|
69 |
+
train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)
|
70 |
|
71 |
+
# Initialize a multi-component learning pipeline (retriever + LLM)
|
72 |
+
# This configuration enables a Retrieval-Augmented Generation (RAG) setup
|
73 |
pipeline = LearnerPipeline(
|
74 |
+
retriever_id='sentence-transformers/all-MiniLM-L6-v2', # Dense retriever model for nearest neighbor search
|
75 |
+
llm_id='Qwen/Qwen2.5-0.5B-Instruct', # Lightweight instruction-tuned LLM for reasoning
|
76 |
+
hf_token='...', # Hugging Face token for accessing gated models
|
77 |
+
batch_size=32, # Batch size for training/prediction if supported
|
78 |
+
top_k=5 # Number of top retrievals to include in RAG prompting
|
79 |
)
|
80 |
|
81 |
+
# Run the pipeline: training, prediction, and evaluation in one call
|
82 |
+
outputs = pipeline(
|
83 |
train_data=train_data,
|
84 |
test_data=test_data,
|
85 |
+
evaluate=True, # Compute metrics like precision, recall, and F1
|
86 |
+
task='term-typing' # Specifies the task
|
87 |
+
# Other options: "taxonomy-discovery" or "non-taxonomy-discovery"
|
88 |
)
|
89 |
+
|
90 |
+
# Print final evaluation metrics
|
91 |
+
print("Metrics:", outputs['metrics'])
|
92 |
+
|
93 |
+
# Print the total time taken for the full pipeline execution
|
94 |
+
print("Elapsed time:", outputs['elapsed_time'])
|
95 |
+
|
96 |
+
# Print all outputs (including predictions)
|
97 |
+
print(outputs)
|
98 |
```
|
99 |
|
100 |
For more detailed documentation, see the [](https://ontolearner.readthedocs.io)
|