# Integration Examples This document provides concrete examples of integrating LLMPromptKit into various applications and workflows. ## Customer Support Chatbot ### Setup ```python from llmpromptkit import PromptManager, VersionControl import openai # Initialize components prompt_manager = PromptManager() version_control = VersionControl(prompt_manager) # Create prompt templates for different scenarios greeting_prompt = prompt_manager.create( content="You are a helpful customer service agent for {company_name}. Greet the customer politely.", name="Customer Greeting", tags=["customer-service", "greeting"] ) inquiry_prompt = prompt_manager.create( content=""" You are a helpful customer service agent for {company_name}. Customer inquiry: {customer_message} Based on this inquiry: 1. Identify the main issue 2. Provide a helpful response 3. Offer additional assistance Keep your tone professional but friendly. """, name="Customer Inquiry Response", tags=["customer-service", "inquiry"] ) # Version them version_control.commit(greeting_prompt.id, "Initial version") version_control.commit(inquiry_prompt.id, "Initial version") # OpenAI callback def generate_response(prompt_text): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt_text}] ) return response.choices[0].message.content # Main handler function def handle_customer_message(customer_name, message, is_new_conversation): if is_new_conversation: # Use greeting prompt for new conversations prompt = prompt_manager.get(greeting_prompt.id) prompt_text = prompt.render(company_name="Acme Inc.") return generate_response(prompt_text) else: # Use inquiry prompt for ongoing conversations prompt = prompt_manager.get(inquiry_prompt.id) prompt_text = prompt.render( company_name="Acme Inc.", customer_message=message ) return generate_response(prompt_text) ``` ## Content Generation System ### Setup ```python from llmpromptkit import PromptManager, PromptTesting, Evaluator import asyncio # Initialize components prompt_manager = PromptManager("content_system_prompts") testing = PromptTesting(prompt_manager) evaluator = Evaluator(prompt_manager) # Create content generation prompt blog_prompt = prompt_manager.create( content=""" Write a blog post about {topic}. Title: {title} The post should: - Be approximately {word_count} words - Be written in a {tone} tone - Include {num_sections} main sections - Target audience: {audience} - Include a compelling call-to-action at the end Keywords to include: {keywords} """, name="Blog Post Generator", tags=["content", "blog"] ) # Test cases test_case = testing.create_test_case( prompt_id=blog_prompt.id, input_vars={ "topic": "Sustainable Living", "title": "10 Simple Ways to Reduce Your Carbon Footprint", "word_count": "800", "tone": "informative yet casual", "num_sections": "5", "audience": "environmentally-conscious millennials", "keywords": "sustainability, eco-friendly, carbon footprint, climate change, lifestyle changes" } ) # LLM callback async def content_llm_callback(prompt, vars): # Call your preferred LLM API here # This is a placeholder return f"Generated content about {vars.get('topic', 'unknown topic')}" # Content generation function async def generate_content(content_type, parameters): if content_type == "blog": prompt = prompt_manager.get(blog_prompt.id) rendered_prompt = prompt.render(**parameters) # Generate content content = await content_llm_callback(rendered_prompt, parameters) # Evaluate quality evaluation = await evaluator.evaluate_prompt( prompt_id=blog_prompt.id, inputs=[parameters], llm_callback=content_llm_callback ) quality_score = evaluation["aggregated_metrics"].get("length", 0) return { "content": content, "quality_score": quality_score, "metadata": { "prompt_id": blog_prompt.id, "prompt_version": prompt.version, "parameters": parameters } } else: raise ValueError(f"Unsupported content type: {content_type}") ``` ## AI-Assisted Research Tool ### Setup ```python from llmpromptkit import PromptManager, VersionControl import json import openai # Initialize components prompt_manager = PromptManager("research_prompts") version_control = VersionControl(prompt_manager) # Create research prompts article_summary_prompt = prompt_manager.create( content=""" Summarize the following research article: Title: {article_title} Abstract: {article_abstract} Provide a summary that: 1. Identifies the main research question 2. Outlines the methodology 3. Summarizes key findings 4. Highlights limitations 5. Explains the significance of the results Keep the summary concise, approximately 250 words. """, name="Article Summarizer", tags=["research", "summary"] ) research_question_prompt = prompt_manager.create( content=""" Based on the following information: Research Area: {research_area} Existing Knowledge: {existing_knowledge} Observed Gap: {knowledge_gap} Generate 5 potential research questions that: 1. Address the identified knowledge gap 2. Are specific and answerable 3. Have theoretical or practical significance 4. Can be investigated with available research methods """, name="Research Question Generator", tags=["research", "question-generation"] ) # Version control version_control.commit(article_summary_prompt.id, "Initial version") version_control.commit(research_question_prompt.id, "Initial version") # OpenAI callback def research_assistant(prompt_text): response = openai.ChatCompletion.create( model="gpt-4", messages=[{"role": "user", "content": prompt_text}] ) return response.choices[0].message.content # Research functions def summarize_article(article_title, article_abstract): prompt = prompt_manager.get(article_summary_prompt.id) prompt_text = prompt.render( article_title=article_title, article_abstract=article_abstract ) return research_assistant(prompt_text) def generate_research_questions(research_area, existing_knowledge, knowledge_gap): prompt = prompt_manager.get(research_question_prompt.id) prompt_text = prompt.render( research_area=research_area, existing_knowledge=existing_knowledge, knowledge_gap=knowledge_gap ) return research_assistant(prompt_text) # Save results def save_research_data(research_project, data_type, content): # Save the data along with prompt metadata for reproducibility if data_type == "summary": prompt_id = article_summary_prompt.id prompt = prompt_manager.get(prompt_id) elif data_type == "questions": prompt_id = research_question_prompt.id prompt = prompt_manager.get(prompt_id) research_data = { "content": content, "metadata": { "prompt_id": prompt_id, "prompt_version": prompt.version, "timestamp": datetime.datetime.now().isoformat() } } # Save to file (in real application, might save to database) with open(f"{research_project}_{data_type}.json", "w") as f: json.dump(research_data, f, indent=2) ``` ## Educational Quiz Generator ### Setup ```python from llmpromptkit import PromptManager, PromptTemplate import asyncio import aiohttp # Initialize components prompt_manager = PromptManager("education_prompts") # Quiz generation prompt quiz_prompt = prompt_manager.create( content=""" Generate a quiz on the topic of {topic} at a {difficulty_level} difficulty level. The quiz should: - Have {num_questions} multiple-choice questions - Cover the following subtopics: {subtopics} - Include {include_explanation} explanations for the correct answers - Be appropriate for {grade_level} students For each question, provide: 1. The question text 2. Four possible answers (A, B, C, D) 3. The correct answer {if include_explanation == "yes"} 4. An explanation of why the answer is correct {endif} Format the output as valid JSON. """, name="Quiz Generator", tags=["education", "quiz"] ) # Quiz rendering template using advanced templating render_template = PromptTemplate("""