Model Card for Model ID

This model card serves as a base template for new models. It has been generated using this raw template.

Model Details

Training Details

Training Data

The model was trained on the HumanLLMs/Human-Like-DPO-Dataset dataset.

Training Procedure

The model is trained using the SPIN (Self-Play with Imitation and Negotiation) implementation.

Load and Preprocess the Dataset

import json
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch.nn.functional as F
from torch.optim import AdamW

class DPODataset(Dataset):
    def __init__(self, data_path, tokenizer, max_length=512):
        with open(data_path, 'r') as file:
            data = json.load(file)

        self.examples = []
        for entry in data:
            prompt = entry['prompt']
            chosen = entry['chosen']
            rejected = entry['rejected']

            # Tokenize prompt, chosen, and rejected
            tokenized_prompt = tokenizer(prompt, truncation=True, max_length=max_length, return_tensors="pt")
            tokenized_chosen = tokenizer(chosen, truncation=True, max_length=max_length, return_tensors="pt")
            tokenized_rejected = tokenizer(rejected, truncation=True, max_length=max_length, return_tensors="pt")

            # Store in examples
            self.examples.append({
                "prompt": tokenized_prompt,
                "chosen": tokenized_chosen,
                "rejected": tokenized_rejected
            })

    def __len__(self):
        return len(self.examples)

    def __getitem__(self, idx):
        return self.examples[idx]

# Assuming you have a tokenizer called 'tokenizer'
dpo_dataset = DPODataset('dpo_dataset_16_09_2024_more_info_convs_2k.json', tokenizer)
dataloader = DataLoader(dataset, batch_size=3, shuffle=True)

Define the SPIN Loss Function

def spin_loss(model, opponent, inputs):
    outputs = model(**inputs, labels=inputs["input_ids"])
    log_probs = F.log_softmax(outputs.logits, dim=-1)
    true_log_probs = torch.gather(log_probs, -1, inputs["input_ids"].unsqueeze(-1)).squeeze(-1)

    with torch.no_grad():
        opponent_outputs = opponent(**inputs, labels=inputs["input_ids"])
        opponent_log_probs = F.log_softmax(opponent_outputs.logits, dim=-1)
        opponent_true_log_probs = torch.gather(opponent_log_probs, -1, inputs["input_ids"].unsqueeze(-1)).squeeze(-1)

    loss = (true_log_probs - opponent_true_log_probs).mean()
    return loss

Training Loop

num_epochs = 10
learning_rate = 0.0002

# Load model
model = AutoModelForCausalLM.from_pretrained("model_name")

# Initialize the opponent model
opponent = AutoModelForCausalLM.from_pretrained(model_name)
opponent.load_state_dict(model.state_dict())

optimizer = AdamW(model.parameters(), lr=learning_rate)

model.train()
for epoch in range(num_epochs):
    for batch in dataloader:
        inputs = {key: val.squeeze(1).to(model.device) for key, val in batch.items()}

        # Compute SPIN loss
        loss = spin_loss(model, opponent, inputs)

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    # Update the opponent model
    opponent.load_state_dict(model.state_dict())
    print(f"Epoch {epoch + 1}/{num_epochs} completed. Loss: {loss.item()}")

Save the Fine-Tuned Model

model.save_pretrained("fine_tuned_dpo_neo_spin")
tokenizer.save_pretrained("fine_tuned_dpo_neo_spin")

Usage

To use the fine-tuned model:

from transformers import AutoModelForCausalLM, AutoTokenizer

model_path = "fine_tuned_dpo_neo_spin"
model = AutoModelForCausalLM.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)

def generate_response(prompt):
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(**inputs)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

print(generate_response("Hello, how can I help you?"))

Citation

If you use this model, please cite:

@misc{model_id,
  author = {Your Name},
  title = {Model ID},
  year = {2025},
  url = {https://huggingface.co/your-model-id}
}
Downloads last month
19
Safetensors
Model size
1.1B params
Tensor type
F32
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for mrcuddle/Tiny-DarkLlama-DPO-SPIN-Implementation

Finetuned
(3)
this model
Quantizations
1 model

Dataset used to train mrcuddle/Tiny-DarkLlama-DPO-SPIN-Implementation