upup-ashton-wang commited on
Commit
f54dee8
verified
1 Parent(s): effe6e5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -33,13 +33,13 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
33
 
34
  # Load the tokenizer and model
35
  tokenizer = AutoTokenizer.from_pretrained("metagene-ai/METAGENE-1")
36
- model = AutoModelForCausalLM.from_pretrained("metagene-ai/METAGENE-1", torch_dtype=torch.bfloat16)
37
 
38
  # Example input sequence
39
  input_sequence = "TCACCGTTCTACAATCCCAAGCTGGAGTCAAGCTCAACAGGGTCTTC"
40
 
41
  # Tokenize the input sequence and remove the [EOS] token for generation
42
- input_tokens = tokenizer.encode(input_sequence, return_tensors="pt", add_special_tokens=False)
43
 
44
  # Generate output from the model
45
  generated_tokens = model.generate(input_tokens, max_length=32)
 
33
 
34
  # Load the tokenizer and model
35
  tokenizer = AutoTokenizer.from_pretrained("metagene-ai/METAGENE-1")
36
+ model = AutoModelForCausalLM.from_pretrained("metagene-ai/METAGENE-1", torch_dtype=torch.bfloat16, device_map="auto")
37
 
38
  # Example input sequence
39
  input_sequence = "TCACCGTTCTACAATCCCAAGCTGGAGTCAAGCTCAACAGGGTCTTC"
40
 
41
  # Tokenize the input sequence and remove the [EOS] token for generation
42
+ input_tokens = tokenizer.encode(input_sequence, return_tensors="pt", add_special_tokens=False).to(model.device)
43
 
44
  # Generate output from the model
45
  generated_tokens = model.generate(input_tokens, max_length=32)