{main_content}
{tab1}
{tab2}
{tab3}
Content Sentiment: {sentiment}
import spaces import gradio as gr from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification from diffusers import StableDiffusionPipeline import torch import numpy as np import random from datasets import Dataset from huggingface_hub import HfApi from datetime import datetime, time from accelerate import Accelerator from accelerate.utils import set_seed from PIL import Image import io import base64 # Set a seed for reproducibility set_seed(42) # Initialize Accelerator accelerator = Accelerator() device = 0 if torch.cuda.is_available() else -1 # Use GPU 0 if available, else CPU # Initialize models text_generator = pipeline("text-generation", model="gpt2-medium", device=device) image_generator = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) image_generator = image_generator.to(f"cuda:{device}" if device >= 0 else "cpu") tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") sentiment_model = sentiment_model.to(f"cuda:{device}" if device >= 0 else "cpu") # Global variables for feedback collection feedback_data = [] # Initialize HfApi api = HfApi() # Define the topics list in the global scope topics = [ "space exploration", "artificial intelligence", "environmental conservation", "quantum computing", "renewable energy", "climate change", "biotechnology", "nanotechnology", "cybersecurity", "robotics", "virtual reality and augmented reality", "autonomous vehicles", "genetic engineering", "3D printing and additive manufacturing", "internet of things (IoT)", "blockchain technology", "sustainable agriculture", "smart cities", "digital health and telemedicine", "renewable energy storage solutions", "advanced materials science", "machine learning and data science", "oceanography and marine conservation", "AI ethics and governance", "futuristic urban planning", "human-computer interaction", "sustainable architecture", "nuclear fusion research", "environmental impact of technology", "bioinformatics", "synthetic biology", "renewable energy grid integration", "environmental policy and legislation", "human augmentation and enhancement", "climate engineering (geoengineering)", "high-performance computing (HPC)", "sustainable transportation", "energy harvesting technologies", "cognitive computing", "deep learning and neural networks", "zero-waste living", "environmental education and awareness", "sustainable water management", "green technology innovation", "impact of AI on employment", "ethical hacking", "personalized medicine", "advanced prosthetics and bionics", "circular economy", "environmental justice", "human-robot collaboration", "artificial intelligence in art and creativity", "AI in climate modeling", "renewable energy microgrids", "sustainable fashion", "quantum cryptography", "energy-efficient computing", "wildlife conservation and habitat preservation", "genomic editing and CRISPR", "big data analytics", "ethics in genetic engineering", "sustainable fisheries and aquaculture", "urban resilience to climate change", "AI in healthcare diagnostics", "eco-friendly packaging solutions", "AI in financial markets", "conservation technology", "green building certifications (e.g., LEED)", "AI-powered drug discovery", "sustainable mining practices", "remote sensing for environmental monitoring", "conservation of endangered species", "AI in supply chain optimization", "sustainable product design", "regenerative agriculture", "quantum teleportation", "sustainable tourism", "AI-driven autonomous systems", "carbon capture and storage (CCS)", "resilient infrastructure development", "sustainable energy policies", "AI in language translation", "sustainable waste management", "advanced robotics in manufacturing", "precision agriculture", "smart grid technology", "biomimicry in engineering", "ethical AI development", "neurotechnology", "urban vertical farming", "quantum sensors", "blockchain in supply chain transparency", "AI in education and personalized learning", "sustainable fashion and textile innovation", "green chemistry", "smart home technology", "AI in cybersecurity", "sustainable packaging solutions", "edge computing", "autonomous drones in various industries", "AI in music composition", "sustainable urban mobility", "quantum machine learning", "bioplastics and biodegradable materials", "AI in weather forecasting", "sustainable construction materials", "brain-computer interfaces", "AI in legal tech and justice systems", "sustainable food systems", "quantum computing in finance", "smart wearable technology", "AI in agriculture (precision farming)", "sustainable aviation fuels", "advanced recycling technologies", "AI in content creation and journalism", "sustainable concrete alternatives", "quantum sensing in healthcare", "AI in customer service and chatbots", "sustainable urban water systems", "bioengineered organs", "AI in predictive maintenance", "sustainable packaging design", "quantum computing in drug discovery", "AI-powered personal assistants", "sustainable forestry practices", "next-generation batteries", "AI in sports analytics and training", "sustainable textile production", "quantum metrology", "AI in disaster prediction and management", "sustainable refrigeration technologies", "neuromorphic computing", "AI in wildlife conservation", "sustainable desalination techniques", "quantum-resistant cryptography", "AI in urban planning and design", "sustainable coffee production", "advanced materials for energy storage", "AI in mental health support", "sustainable chocolate production", "topological quantum computing", "AI in archaeological discoveries", "sustainable livestock management", "perovskite solar cells", "AI in air quality monitoring and improvement", "sustainable paper and pulp production" ] def set_sleep_time(): sleep_start = time(hour=2, minute=0) sleep_end = time(hour=6, minute=0) try: api.set_space_sleep_time( repo_id="Oranblock/Websitem", # Replace with your actual Space name sleep_start_time=sleep_start, sleep_end_time=sleep_end, timezone="UTC" ) return "Sleep time set successfully" except Exception as e: return f"Error setting sleep time: {str(e)}" @spaces.GPU @torch.no_grad() def generate_text(prompt): return text_generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text'] @spaces.GPU @torch.no_grad() def generate_image(prompt): with torch.autocast("cuda" if device >= 0 else "cpu"): image = image_generator(prompt, guidance_scale=7.5).images[0] return image @spaces.GPU @torch.no_grad() def analyze_sentiment(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(f"cuda:{device}" if device >= 0 else "cpu") outputs = sentiment_model(**inputs) probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) return probabilities.cpu().numpy()[0] def generate_random_css(): font_families = ['Arial', 'Helvetica', 'Verdana', 'Georgia', 'Palatino', 'Garamond', 'Bookman', 'Comic Sans MS', 'Trebuchet MS', 'Arial Black'] css = { 'font_family': random.choice(font_families), 'font_size': f'{random.randint(14, 24)}px', 'background_color1': "#{:06x}".format(random.randint(0, 0xFFFFFF)), 'background_color2': "#{:06x}".format(random.randint(0, 0xFFFFFF)), 'text_color': "#{:06x}".format(random.randint(0, 0xFFFFFF)), 'border_radius': f'{random.randint(0, 20)}px', 'padding': f'{random.randint(10, 30)}px', 'margin': f'{random.randint(10, 30)}px', 'box_shadow': f'{random.randint(0, 10)}px {random.randint(0, 10)}px {random.randint(0, 20)}px rgba(0,0,0,{random.uniform(0.1, 0.5):.1f})', 'transform': f'rotate({random.uniform(-5, 5):.2f}deg)', 'animation_duration': f'{random.uniform(0.5, 2):.1f}s', } return css def generate_website_content(): topic = random.choice(topics) title = generate_text(f"A unique website title about {topic}:").split(':')[-1].strip() main_content = generate_text(f"A short paragraph about {topic}:").split(':')[-1].strip() tab1_content = generate_text(f"Interesting facts about {topic}:").split(':')[-1].strip() tab2_content = generate_text(f"Future prospects of {topic}:").split(':')[-1].strip() tab3_content = generate_text(f"How {topic} impacts our daily lives:").split(':')[-1].strip() image = generate_image(f"An artistic representation of {topic}") sentiment = analyze_sentiment(main_content) sentiment_label = "Positive" if sentiment[1] > sentiment[0] else "Negative" css = generate_random_css() return title, main_content, tab1_content, tab2_content, tab3_content, image, sentiment_label, css def update_website(): title, main_content, tab1, tab2, tab3, image, sentiment, css = generate_website_content() buffered = io.BytesIO() image.save(buffered, format="PNG") img_str = base64.b64encode(buffered.getvalue()).decode() html_content = f"""
{main_content}
{tab1}
{tab2}
{tab3}
Content Sentiment: {sentiment}