dhead commited on
Commit
7be208c
·
verified ·
1 Parent(s): e64b265

Upload 3 files

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +2 -2
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: WaiNSFWIllustrious V110
3
  emoji: 🖼
4
  colorFrom: purple
5
  colorTo: red
 
1
  ---
2
+ title: WaiNSFWIllustrious V130 Space
3
  emoji: 🖼
4
  colorFrom: purple
5
  colorTo: red
app.py CHANGED
@@ -15,7 +15,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
 
16
  # Make sure to use torch.float16 consistently throughout the pipeline
17
  pipe = StableDiffusionXLPipeline.from_pretrained(
18
- "dhead/waiNSFWIllustrious_v120",
19
  torch_dtype=torch.float16,
20
  variant="fp16", # Explicitly use fp16 variant
21
  use_safetensors=True # Use safetensors if available
@@ -33,7 +33,7 @@ pipe.unet.to(torch.float16)
33
  MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 1216
35
 
36
- @spaces.GPU(duration=10)
37
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
38
  # Check and truncate prompt if too long (CLIP can only handle 77 tokens)
39
  if len(prompt.split()) > 60: # Rough estimate to avoid exceeding token limit
 
15
 
16
  # Make sure to use torch.float16 consistently throughout the pipeline
17
  pipe = StableDiffusionXLPipeline.from_pretrained(
18
+ "dhead/waiNSFWIllustrious_v130",
19
  torch_dtype=torch.float16,
20
  variant="fp16", # Explicitly use fp16 variant
21
  use_safetensors=True # Use safetensors if available
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 1216
35
 
36
+ @spaces.GPU()
37
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
38
  # Check and truncate prompt if too long (CLIP can only handle 77 tokens)
39
  if len(prompt.split()) > 60: # Rough estimate to avoid exceeding token limit