nano-coder-free / hf_free_training.py
mlopez6132's picture
Upload hf_free_training.py with huggingface_hub
695e015 verified
"""
Free H200 Training Script for Nano-Coder
Optimized for HF's free 4-minute daily H200 access
"""
import os
import time
import math
import pickle
from contextlib import nullcontext
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
from model import GPTConfig, GPT
# Hugging Face specific imports
from huggingface_hub import HfApi, login
import wandb
# -----------------------------------------------------------------------------
# Configuration optimized for FREE H200 (4 minutes daily)
# I/O
out_dir = 'out-nano-coder-free'
eval_interval = 50 # Very frequent evaluation for short runs
log_interval = 2
eval_iters = 10 # Fewer eval iterations
eval_only = False
always_save_checkpoint = True
init_from = 'scratch'
# wandb logging - enabled for HF
wandb_log = True
wandb_project = 'nano-coder-free'
wandb_run_name = 'nano-coder-h200-free'
# data
dataset = 'python-codes-25k'
gradient_accumulation_steps = 1 * 8 # Minimal for H200
batch_size = 64 # Larger batch size for H200 efficiency
block_size = 512 # Smaller context for faster training
# model - smaller for free tier
n_layer = 6 # Reduced from 12
n_head = 6 # Reduced from 12
n_embd = 384 # Reduced from 768
dropout = 0.1
bias = False
# optimizer - optimized for H200
learning_rate = 1e-3 # Higher learning rate for faster convergence
max_iters = 1000 # Limited iterations for 4-minute runs
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
grad_clip = 1.0
# learning rate decay - faster for short runs
decay_lr = True
warmup_iters = 100 # Shorter warmup
lr_decay_iters = 1000
min_lr = 1e-4
# DDP settings
backend = 'nccl'
# system
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16'
compile = True
# HF specific
hf_repo_id = "mlopez6132/nano-coder-free" # Free tier repo
push_to_hub = True
# Time tracking for 4-minute limit
start_time = time.time()
MAX_TRAINING_TIME = 3.5 * 60 # 3.5 minutes to be safe
# -----------------------------------------------------------------------------
config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))]
exec(open('configurator.py').read())
config = {k: globals()[k] for k in config_keys}
# -----------------------------------------------------------------------------
# HF setup
if push_to_hub:
# Check if HF_TOKEN environment variable is set
if os.environ.get('HF_TOKEN'):
login(token=os.environ.get('HF_TOKEN'))
else:
# Try to login without token (will use cached credentials)
try:
login()
except Exception as e:
print(f"Warning: Could not login to HF Hub: {e}")
print("Continuing without HF Hub upload...")
push_to_hub = False
if push_to_hub:
api = HfApi()
# various inits, derived attributes, I/O setup
ddp = int(os.environ.get('RANK', -1)) != -1
if ddp:
init_process_group(backend=backend)
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
master_process = ddp_rank == 0
seed_offset = ddp_rank
assert gradient_accumulation_steps % ddp_world_size == 0
gradient_accumulation_steps //= ddp_world_size
else:
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * block_size
print(f"tokens per iteration will be: {tokens_per_iter:,}")
print(f"FREE H200 TRAINING - MAX TIME: {MAX_TRAINING_TIME/60:.1f} minutes")
if master_process:
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
device_type = 'cuda' if 'cuda' in device else 'cpu'
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
# data loader
data_dir = os.path.join('data', dataset)
def get_batch(split):
if split == 'train':
data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
else:
data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
ix = torch.randint(len(data) - block_size, (batch_size,))
x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix])
y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix])
if device_type == 'cuda':
x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True)
else:
x, y = x.to(device), y.to(device)
return x, y
# init these up here, can override if init_from='resume'
iter_num = 0
best_val_loss = 1e9
# attempt to derive vocab_size from the dataset
meta_path = os.path.join(data_dir, 'meta.pkl')
meta_vocab_size = None
if os.path.exists(meta_path):
with open(meta_path, 'rb') as f:
meta = pickle.load(f)
meta_vocab_size = meta['vocab_size']
print(f"found vocab_size = {meta_vocab_size} (inside {meta_path})")
# model init
model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=block_size,
bias=bias, vocab_size=None, dropout=dropout)
if init_from == 'scratch':
print("Initializing a new nano-coder model from scratch (FREE TIER)")
if meta_vocab_size is None:
print("defaulting to vocab_size of GPT-2 to 50304")
model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304
gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
elif init_from == 'resume':
print(f"Resuming training from {out_dir}")
ckpt_path = os.path.join(out_dir, 'ckpt.pt')
checkpoint = torch.load(ckpt_path, map_location=device)
checkpoint_model_args = checkpoint['model_args']
for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']:
model_args[k] = checkpoint_model_args[k]
gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
state_dict = checkpoint['model']
unwanted_prefix = '_orig_mod.'
for k,v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict)
iter_num = checkpoint['iter_num']
best_val_loss = checkpoint['best_val_loss']
elif init_from.startswith('gpt2'):
print(f"Initializing from OpenAI GPT-2 weights: {init_from}")
override_args = dict(dropout=dropout)
model = GPT.from_pretrained(init_from, override_args)
for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']:
model_args[k] = getattr(model.config, k)
if block_size < model.config.block_size:
model.crop_block_size(block_size)
model_args['block_size'] = block_size
model.to(device)
# initialize a GradScaler
scaler = torch.cuda.amp.GradScaler(enabled=(dtype == 'float16'))
# optimizer
optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type)
if init_from == 'resume':
optimizer.load_state_dict(checkpoint['optimizer'])
checkpoint = None
# compile the model
if compile:
print("compiling the model... (takes a ~minute)")
unoptimized_model = model
model = torch.compile(model)
# wrap model into DDP container
if ddp:
model = DDP(model, device_ids=[ddp_local_rank])
# helps estimate an arbitrarily accurate loss over either split using many batches
@torch.no_grad()
def estimate_loss():
out = {}
model.eval()
for split in ['train', 'val']:
losses = torch.zeros(eval_iters)
for k in range(eval_iters):
X, Y = get_batch(split)
with ctx:
logits, loss = model(X, Y)
losses[k] = loss.item()
out[split] = losses.mean()
model.train()
return out
# learning rate decay scheduler (cosine with warmup)
def get_lr(it):
if it < warmup_iters:
return learning_rate * (it + 1) / (warmup_iters + 1)
if it > lr_decay_iters:
return min_lr
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
assert 0 <= decay_ratio <= 1
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
return min_lr + coeff * (learning_rate - min_lr)
# logging
if wandb_log and master_process:
wandb.init(project=wandb_project, name=wandb_run_name, config=config)
# HF checkpoint upload function
def upload_checkpoint_to_hf(checkpoint_path, iter_num):
if push_to_hub and master_process:
try:
# Create a unique filename
filename = f"checkpoint_iter_{iter_num}.pt"
file_path = os.path.join(out_dir, filename)
# Copy checkpoint with new name
import shutil
shutil.copy2(checkpoint_path, file_path)
# Upload to HF
api.upload_file(
path_or_fileobj=file_path,
path_in_repo=filename,
repo_id=hf_repo_id,
repo_type="model"
)
print(f"Uploaded checkpoint to HF: {filename}")
# Clean up local copy
os.remove(file_path)
except Exception as e:
print(f"Failed to upload checkpoint: {e}")
# training loop
print("Starting FREE H200 nano-coder training...")
X, Y = get_batch('train')
t0 = time.time()
local_iter_num = 0
raw_model = model.module if ddp else model
running_mfu = -1.0
while True:
# Check time limit
elapsed_time = time.time() - start_time
if elapsed_time > MAX_TRAINING_TIME:
print(f"\n⏰ TIME LIMIT REACHED! Training stopped after {elapsed_time/60:.1f} minutes")
break
# determine and set the learning rate for this iteration
lr = get_lr(iter_num) if decay_lr else learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# evaluate the loss on train/val sets and write checkpoints
if iter_num % eval_interval == 0 and master_process:
losses = estimate_loss()
remaining_time = MAX_TRAINING_TIME - elapsed_time
print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}, time left: {remaining_time/60:.1f}min")
if wandb_log:
wandb.log({
"iter": iter_num,
"train/loss": losses['train'],
"val/loss": losses['val'],
"lr": lr,
"mfu": running_mfu*100,
"elapsed_time": elapsed_time,
"remaining_time": remaining_time,
})
if losses['val'] < best_val_loss or always_save_checkpoint:
best_val_loss = losses['val']
if iter_num > 0:
checkpoint = {
'model': raw_model.state_dict(),
'optimizer': optimizer.state_dict(),
'model_args': model_args,
'iter_num': iter_num,
'best_val_loss': best_val_loss,
'config': config,
}
checkpoint_path = os.path.join(out_dir, 'ckpt.pt')
print(f"saving checkpoint to {out_dir}")
torch.save(checkpoint, checkpoint_path)
# Upload to HF every 200 iterations (frequent for short runs)
if iter_num % 200 == 0:
upload_checkpoint_to_hf(checkpoint_path, iter_num)
if iter_num == 0 and eval_only:
break
# forward backward update
for micro_step in range(gradient_accumulation_steps):
if ddp:
model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1)
with ctx:
logits, loss = model(X, Y)
loss = loss / gradient_accumulation_steps
X, Y = get_batch('train')
scaler.scale(loss).backward()
# clip the gradient
if grad_clip != 0.0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
# step the optimizer and scaler
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad(set_to_none=True)
# timing and logging
t1 = time.time()
dt = t1 - t0
t0 = t1
if iter_num % log_interval == 0 and master_process:
lossf = loss.item() * gradient_accumulation_steps
if local_iter_num >= 5:
mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt)
running_mfu = mfu if running_mfu == -1.0 else 0.9*running_mfu + 0.1*mfu
remaining_time = MAX_TRAINING_TIME - elapsed_time
print(f"iter {iter_num}: loss {lossf:.4f}, time {dt*1000:.2f}ms, mfu {running_mfu*100:.2f}%, remaining: {remaining_time/60:.1f}min")
iter_num += 1
local_iter_num += 1
# termination conditions
if iter_num > max_iters:
break
if ddp:
destroy_process_group()
# Final upload
if push_to_hub and master_process:
upload_checkpoint_to_hf(os.path.join(out_dir, 'ckpt.pt'), 'final')
total_time = time.time() - start_time
print(f"\n🎉 FREE H200 TRAINING COMPLETED!")
print(f"Total training time: {total_time/60:.1f} minutes")
print(f"Total iterations: {iter_num}")
print(f"Final validation loss: {best_val_loss:.4f}")
print(f"Model saved to: {out_dir}")