|
# Advanced Features |
|
|
|
LLMPromptKit provides several advanced features for sophisticated prompt engineering. |
|
|
|
## Advanced Templating |
|
|
|
LLMPromptKit's templating system goes beyond simple variable substitution, offering conditionals and loops. |
|
|
|
### Basic Variable Substitution |
|
|
|
```python |
|
from llmpromptkit import PromptTemplate |
|
|
|
# Simple variable substitution |
|
template = PromptTemplate("Hello, {name}!") |
|
rendered = template.render(name="John") |
|
# Result: "Hello, John!" |
|
``` |
|
|
|
### Conditional Logic |
|
|
|
```python |
|
# Conditionals |
|
template = PromptTemplate(""" |
|
{if is_formal} |
|
Dear {name}, |
|
|
|
I hope this message finds you well. |
|
{else} |
|
Hey {name}! |
|
{endif} |
|
|
|
{message} |
|
""") |
|
|
|
formal = template.render(is_formal=True, name="Dr. Smith", message="Please review the attached document.") |
|
casual = template.render(is_formal=False, name="Bob", message="Want to grab lunch?") |
|
``` |
|
|
|
### Loops |
|
|
|
```python |
|
# Loops |
|
template = PromptTemplate(""" |
|
Here are your tasks: |
|
|
|
{for task in tasks} |
|
- {task.priority}: {task.description} |
|
{endfor} |
|
""") |
|
|
|
rendered = template.render(tasks=[ |
|
{"priority": "High", "description": "Complete the report"}, |
|
{"priority": "Medium", "description": "Schedule meeting"}, |
|
{"priority": "Low", "description": "Organize files"} |
|
]) |
|
``` |
|
|
|
### Nested Structures |
|
|
|
```python |
|
# Combining loops and conditionals |
|
template = PromptTemplate(""" |
|
{system_message} |
|
|
|
{for example in examples} |
|
User: {example.input} |
|
{if example.has_reasoning} |
|
Reasoning: {example.reasoning} |
|
{endif} |
|
Assistant: {example.output} |
|
{endfor} |
|
|
|
User: {query} |
|
Assistant: |
|
""") |
|
|
|
rendered = template.render( |
|
system_message="You are a helpful assistant.", |
|
examples=[ |
|
{ |
|
"input": "What's 2+2?", |
|
"has_reasoning": True, |
|
"reasoning": "Adding 2 and 2 gives 4", |
|
"output": "4" |
|
}, |
|
{ |
|
"input": "Hello", |
|
"has_reasoning": False, |
|
"output": "Hi there! How can I help you today?" |
|
} |
|
], |
|
query="What's the capital of France?" |
|
) |
|
``` |
|
|
|
## Custom Evaluation Metrics |
|
|
|
You can create custom metrics to evaluate prompt outputs based on your specific requirements. |
|
|
|
### Creating a Custom Metric |
|
|
|
```python |
|
from llmpromptkit import EvaluationMetric |
|
|
|
class RelevanceMetric(EvaluationMetric): |
|
"""Evaluates relevance of output to a given topic.""" |
|
|
|
def __init__(self, topics): |
|
super().__init__("relevance", "Evaluates relevance to specified topics") |
|
self.topics = topics |
|
|
|
def compute(self, generated_output, expected_output=None, **kwargs): |
|
""" |
|
Compute relevance score based on topic presence. |
|
Returns a float between 0 and 1. |
|
""" |
|
score = 0 |
|
output_lower = generated_output.lower() |
|
|
|
for topic in self.topics: |
|
if topic.lower() in output_lower: |
|
score += 1 |
|
|
|
# Normalize to 0-1 range |
|
return min(1.0, score / len(self.topics)) if self.topics else 0.0 |
|
``` |
|
|
|
### Using Custom Metrics |
|
|
|
```python |
|
from llmpromptkit import Evaluator, PromptManager |
|
|
|
# Initialize components |
|
prompt_manager = PromptManager() |
|
evaluator = Evaluator(prompt_manager) |
|
|
|
# Register custom metric |
|
climate_relevance = RelevanceMetric(["climate", "temperature", "warming", "environment"]) |
|
evaluator.register_metric(climate_relevance) |
|
|
|
# Use in evaluation |
|
async def my_llm(prompt, vars): |
|
# Call your LLM API here |
|
return "Climate change is causing global temperature increases..." |
|
|
|
results = await evaluator.evaluate_prompt( |
|
prompt_id="abc123", |
|
inputs=[{"topic": "climate change"}], |
|
llm_callback=my_llm, |
|
metric_names=["relevance"] # Use our custom metric |
|
) |
|
|
|
print(f"Relevance score: {results['aggregated_metrics']['relevance']}") |
|
``` |
|
|
|
## Customizing Storage |
|
|
|
LLMPromptKit allows you to customize where and how prompts and related data are stored. |
|
|
|
### Custom Storage Locations |
|
|
|
```python |
|
# Specify a custom storage location |
|
prompt_manager = PromptManager("/path/to/my/prompts") |
|
|
|
# Export/import prompts |
|
import json |
|
|
|
# Export a prompt to a file |
|
prompt = prompt_manager.get("abc123") |
|
with open("exported_prompt.json", "w") as f: |
|
json.dump(prompt.to_dict(), f, indent=2) |
|
|
|
# Import a prompt from a file |
|
with open("exported_prompt.json", "r") as f: |
|
data = json.load(f) |
|
imported_prompt = prompt_manager.import_prompt(data) |
|
``` |
|
|
|
## LLM Integration |
|
|
|
LLMPromptKit is designed to work with any LLM through callback functions. Here are examples of integrating with popular LLM APIs. |
|
|
|
### OpenAI Integration |
|
|
|
```python |
|
import openai |
|
from llmpromptkit import PromptManager, PromptTesting |
|
|
|
prompt_manager = PromptManager() |
|
testing = PromptTesting(prompt_manager) |
|
|
|
# Configure OpenAI |
|
openai.api_key = "your-api-key" |
|
|
|
# OpenAI callback function |
|
async def openai_callback(prompt, vars): |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4", |
|
messages=[{"role": "user", "content": prompt}], |
|
temperature=0.7, |
|
max_tokens=150 |
|
) |
|
return response.choices[0].message.content |
|
|
|
# Run tests with OpenAI |
|
test_results = await testing.run_all_tests("abc123", openai_callback) |
|
``` |
|
|
|
### Anthropic Integration |
|
|
|
```python |
|
import anthropic |
|
from llmpromptkit import PromptManager, Evaluator |
|
|
|
prompt_manager = PromptManager() |
|
evaluator = Evaluator(prompt_manager) |
|
|
|
# Configure Anthropic |
|
client = anthropic.Anthropic(api_key="your-api-key") |
|
|
|
# Anthropic callback function |
|
async def anthropic_callback(prompt, vars): |
|
response = client.messages.create( |
|
model="claude-2", |
|
messages=[{"role": "user", "content": prompt}], |
|
max_tokens=150 |
|
) |
|
return response.content[0].text |
|
|
|
# Evaluate with Anthropic |
|
eval_results = await evaluator.evaluate_prompt( |
|
prompt_id="abc123", |
|
inputs=[{"query": "What is machine learning?"}], |
|
llm_callback=anthropic_callback |
|
) |
|
``` |
|
|
|
### Hugging Face Integration |
|
|
|
```python |
|
from transformers import pipeline |
|
import asyncio |
|
from llmpromptkit import PromptManager, VersionControl |
|
|
|
prompt_manager = PromptManager() |
|
version_control = VersionControl(prompt_manager) |
|
|
|
# Set up Hugging Face pipeline |
|
generator = pipeline('text-generation', model='gpt2') |
|
|
|
# Hugging Face callback function |
|
async def hf_callback(prompt, vars): |
|
# Run synchronously but in a way that doesn't block the asyncio event loop |
|
loop = asyncio.get_event_loop() |
|
result = await loop.run_in_executor(None, lambda: generator(prompt, max_length=100)[0]['generated_text']) |
|
return result |
|
|
|
# Use with version control |
|
prompt = prompt_manager.create( |
|
content="Complete this: {text}", |
|
name="Text Completion" |
|
) |
|
version_control.commit(prompt.id, "Initial version") |
|
|
|
# Test with different models by swapping the callback |
|
``` |
|
|