minor update to readme
Browse files
README.md
CHANGED
|
@@ -56,30 +56,44 @@ data = ontology.extract()
|
|
| 56 |
|
| 57 |
**How use the loaded dataset for LLM4OL Paradigm task settings?**
|
| 58 |
``` python
|
|
|
|
| 59 |
from ontolearner import GoodRelations, LearnerPipeline, train_test_split
|
| 60 |
|
|
|
|
| 61 |
ontology = GoodRelations()
|
| 62 |
-
ontology.load()
|
| 63 |
data = ontology.extract()
|
| 64 |
|
| 65 |
# Split into train and test sets
|
| 66 |
-
train_data, test_data = train_test_split(data, test_size=0.2)
|
| 67 |
|
| 68 |
-
#
|
|
|
|
| 69 |
pipeline = LearnerPipeline(
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
|
|
|
| 74 |
)
|
| 75 |
|
| 76 |
-
#
|
| 77 |
-
|
| 78 |
train_data=train_data,
|
| 79 |
test_data=test_data,
|
| 80 |
-
|
| 81 |
-
|
|
|
|
| 82 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
```
|
| 84 |
|
| 85 |
For more detailed documentation, see the [](https://ontolearner.readthedocs.io)
|
|
|
|
| 56 |
|
| 57 |
**How use the loaded dataset for LLM4OL Paradigm task settings?**
|
| 58 |
``` python
|
| 59 |
+
# Import core modules from the OntoLearner library
|
| 60 |
from ontolearner import GoodRelations, LearnerPipeline, train_test_split
|
| 61 |
|
| 62 |
+
# Load the GoodRelations ontology, which contains concepts related to wines, their properties, and categories
|
| 63 |
ontology = GoodRelations()
|
| 64 |
+
ontology.load() # Load entities, types, and structured term annotations from the ontology
|
| 65 |
data = ontology.extract()
|
| 66 |
|
| 67 |
# Split into train and test sets
|
| 68 |
+
train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)
|
| 69 |
|
| 70 |
+
# Initialize a multi-component learning pipeline (retriever + LLM)
|
| 71 |
+
# This configuration enables a Retrieval-Augmented Generation (RAG) setup
|
| 72 |
pipeline = LearnerPipeline(
|
| 73 |
+
retriever_id='sentence-transformers/all-MiniLM-L6-v2', # Dense retriever model for nearest neighbor search
|
| 74 |
+
llm_id='Qwen/Qwen2.5-0.5B-Instruct', # Lightweight instruction-tuned LLM for reasoning
|
| 75 |
+
hf_token='...', # Hugging Face token for accessing gated models
|
| 76 |
+
batch_size=32, # Batch size for training/prediction if supported
|
| 77 |
+
top_k=5 # Number of top retrievals to include in RAG prompting
|
| 78 |
)
|
| 79 |
|
| 80 |
+
# Run the pipeline: training, prediction, and evaluation in one call
|
| 81 |
+
outputs = pipeline(
|
| 82 |
train_data=train_data,
|
| 83 |
test_data=test_data,
|
| 84 |
+
evaluate=True, # Compute metrics like precision, recall, and F1
|
| 85 |
+
task='term-typing' # Specifies the task
|
| 86 |
+
# Other options: "taxonomy-discovery" or "non-taxonomy-discovery"
|
| 87 |
)
|
| 88 |
+
|
| 89 |
+
# Print final evaluation metrics
|
| 90 |
+
print("Metrics:", outputs['metrics'])
|
| 91 |
+
|
| 92 |
+
# Print the total time taken for the full pipeline execution
|
| 93 |
+
print("Elapsed time:", outputs['elapsed_time'])
|
| 94 |
+
|
| 95 |
+
# Print all outputs (including predictions)
|
| 96 |
+
print(outputs)
|
| 97 |
```
|
| 98 |
|
| 99 |
For more detailed documentation, see the [](https://ontolearner.readthedocs.io)
|