p1atdev commited on
Commit
7a4e2c1
·
verified ·
1 Parent(s): 2ddeb77

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +35 -35
README.md CHANGED
@@ -30,6 +30,41 @@ Demo: [🤗 Space with ZERO](https://huggingface.co/spaces/p1atdev/danbooru-tags
30
 
31
  ## Usage
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  ### Using 📦`dartrs` library
34
 
35
  > [!WARNING]
@@ -80,41 +115,6 @@ print(f"Time taken: {end - start:.2f}s")
80
  # Time taken: 0.26s
81
  ```
82
 
83
- ## Using 🤗Transformers
84
-
85
- ```py
86
- import torch
87
- from transformers import AutoTokenizer, AutoModelForCausalLM
88
-
89
- MODEL_NAME = "p1atdev/dart-v2-moe-sft"
90
-
91
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
92
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.bfloat16)
93
-
94
- prompt = (
95
- f"<|bos|>"
96
- f"<copyright>vocaloid</copyright>"
97
- f"<character>hatsune miku</character>"
98
- f"<|rating:general|><|aspect_ratio:tall|><|length:long|>"
99
- f"<general>1girl, cat ears<|identity:none|><|input_end|>"
100
- )
101
- inputs = tokenizer(prompt, return_tensors="pt").input_ids
102
-
103
- with torch.no_grad():
104
- outputs = model.generate(
105
- inputs,
106
- do_sample=True,
107
- temperature=1.0,
108
- top_p=1.0,
109
- top_k=100,
110
- max_new_tokens=128,
111
- num_beams=1,
112
- )
113
-
114
- print(", ".join([tag for tag in tokenizer.batch_decode(outputs[0], skip_special_tokens=True) if tag.strip() != ""]))
115
- # vocaloid, hatsune miku, 1girl, cat ears, closed mouth, detached sleeves, dress, expressionless, from behind, full body, green theme, hair ornament, hair ribbon, headphones, high heels, holding, holding microphone, long hair, microphone, monochrome, necktie, ribbon, short dress, shoulder tattoo, simple background, sleeveless, sleeveless dress, spot color, standing, tattoo, thighhighs, twintails, very long hair, white background
116
- ```
117
-
118
  ## Prompt Format
119
 
120
  ```py
 
30
 
31
  ## Usage
32
 
33
+ ## Using 🤗Transformers
34
+
35
+ ```py
36
+ import torch
37
+ from transformers import AutoTokenizer, AutoModelForCausalLM
38
+
39
+ MODEL_NAME = "p1atdev/dart-v2-moe-sft"
40
+
41
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
42
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.bfloat16)
43
+
44
+ prompt = (
45
+ f"<|bos|>"
46
+ f"<copyright>vocaloid</copyright>"
47
+ f"<character>hatsune miku</character>"
48
+ f"<|rating:general|><|aspect_ratio:tall|><|length:long|>"
49
+ f"<general>1girl, cat ears<|identity:none|><|input_end|>"
50
+ )
51
+ inputs = tokenizer(prompt, return_tensors="pt").input_ids
52
+
53
+ with torch.no_grad():
54
+ outputs = model.generate(
55
+ inputs,
56
+ do_sample=True,
57
+ temperature=1.0,
58
+ top_p=1.0,
59
+ top_k=100,
60
+ max_new_tokens=128,
61
+ num_beams=1,
62
+ )
63
+
64
+ print(", ".join([tag for tag in tokenizer.batch_decode(outputs[0], skip_special_tokens=True) if tag.strip() != ""]))
65
+ # vocaloid, hatsune miku, 1girl, cat ears, closed mouth, detached sleeves, dress, expressionless, from behind, full body, green theme, hair ornament, hair ribbon, headphones, high heels, holding, holding microphone, long hair, microphone, monochrome, necktie, ribbon, short dress, shoulder tattoo, simple background, sleeveless, sleeveless dress, spot color, standing, tattoo, thighhighs, twintails, very long hair, white background
66
+ ```
67
+
68
  ### Using 📦`dartrs` library
69
 
70
  > [!WARNING]
 
115
  # Time taken: 0.26s
116
  ```
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  ## Prompt Format
119
 
120
  ```py