YUNAgpt / Gpt
yukimama's picture
Upload 13 files
4c3301f verified
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer
# Load the pre-trained models and tokenizers
wormgpt_model = GPT2LMHeadModel.from_pretrained("wormgpt")
wormgpt_tokenizer = GPT2Tokenizer.from_pretrained("wormgpt")
fraudgpt_model = GPT2LMHeadModel.from_pretrained("fraudgpt")
fraudgpt_tokenizer = GPT2Tokenizer.from_pretrained("fraudgpt")
xxxgpt_model = GPT2LMHeadModel.from_pretrained("xxxgpt")
xxxgpt_tokenizer = GPT2Tokenizer.from_pretrained("xxxgpt")
evilgpt_model = GPT2LMHeadModel.from_pretrained("evilgpt")
evilgpt_tokenizer = GPT2Tokenizer.from_pretrained("evilgpt")
# Function to generate text from a given prompt using the specified model
def generate_text(prompt, model, tokenizer, max_length=50):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
output = model.generate(input_ids, max_length=max_length, num_return_sequences=1)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_text
# Function to generate text from a given prompt using all four models
def generate_uncensored_text(prompt, max_length=50):
wormgpt_text = generate_text(prompt, wormgpt_model, wormgpt_tokenizer, max_length)
fraudgpt_text = generate_text(prompt, fraudgpt_model, fraudgpt_tokenizer, max_length)
xxxgpt_text = generate_text(prompt, xxxgpt_model, xxxgpt_tokenizer, max_length)
evilgpt_text = generate_text(prompt, evilgpt_model, evilgpt_tokenizer, max_length)
return wormgpt_text + "\n" + fraudgpt_text + "\n" + xxxgpt_text + "\n" + evilgpt_text
# Example usage
prompt = "I want to generate some uncensored text."
uncensored_text = generate_uncensored_text(prompt)
print(uncensored_text)