BitTransformerLM / launch_optimized.sh
WCNegentropy's picture
πŸ€– Updated BitTransformerLM from development space
36c78b1 verified
raw
history blame
2.31 kB
#!/bin/bash
#
# BitTransformerLM OPTIMIZED Massive Scale Training Launcher
# ==========================================================
#
# Launches 680M parameter BitTransformerLM with ALL optimizations enabled!
# Uses DataParallel for reliable multi-GPU training.
#
set -e # Exit on any error
echo "πŸš€ BITTRANSFORMERLM OPTIMIZED MASSIVE SCALE TRAINING"
echo "====================================================="
echo "Target: 680 MILLION parameters (CONFIRMED!)"
echo "Hardware: Multi-GPU with DataParallel"
echo "Dataset: WikiText-103 with bit-level encoding"
echo "Optimizations: ALL ENABLED!"
echo ""
# Set environment variables for optimal performance
export CUDA_VISIBLE_DEVICES=0,1,2,3
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
export OMP_NUM_THREADS=12
# Set HuggingFace token
export HF_TOKEN="${HF_TOKEN:-your-token-here}"
# Change to BitTransformerLM directory
cd /data/BitTransformerLM/BitTransformerLM
# Create checkpoint directory
mkdir -p /data/checkpoints
echo "πŸ” Hardware Check:"
python -c "
import torch
print(f'CUDA Available: {torch.cuda.is_available()}')
print(f'GPU Count: {torch.cuda.device_count()}')
for i in range(torch.cuda.device_count()):
props = torch.cuda.get_device_properties(i)
print(f' GPU {i}: {props.name} ({props.total_memory / 1024**3:.1f}GB)')
"
echo ""
echo "βš™οΈ OPTIMIZATIONS ENABLED:"
echo " βœ… Reversible Layers (50% memory savings)"
echo " βœ… Gradient Checkpointing"
echo " βœ… Mixed Precision (FP16)"
echo " βœ… Memory-Mapped Dataset Loading"
echo " βœ… Safety Telemetry (K, C, S metrics)"
echo " βœ… Bit-Native Processing"
echo " βœ… DataParallel Multi-GPU"
echo ""
echo "πŸ“Š Training Configuration:"
echo " β€’ Parameters: 679,962,626 (680M)"
echo " β€’ Architecture: d_model=1536, layers=24, heads=24"
echo " β€’ Batch Size: 2 per GPU"
echo " β€’ Gradient Accumulation: 16 steps"
echo " β€’ Effective Batch Size: 128"
echo " β€’ Learning Rate: 3e-4 with OneCycle"
echo " β€’ Dataset: WikiText-103 (2000 training samples)"
echo ""
echo "🎯 Starting optimized training..."
echo " This version should train successfully!"
echo ""
# Launch optimized training
python massive_scale_simple.py
echo ""
echo "🏁 Training completed successfully!"
echo "Check /data/checkpoints/ for saved models"