Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,17 @@
|
|
7 |
|
8 |
import spaces
|
9 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
import random
|
11 |
import uuid
|
12 |
import re
|
@@ -40,13 +51,10 @@ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
40 |
torch.backends.cudnn.allow_tf32 = False
|
41 |
torch.backends.cudnn.deterministic = False
|
42 |
torch.backends.cudnn.benchmark = False
|
43 |
-
|
44 |
-
|
45 |
torch.set_float32_matmul_precision("highest")
|
46 |
|
47 |
-
os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
|
48 |
-
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
49 |
-
|
50 |
FTP_HOST = "1ink.us"
|
51 |
FTP_USER = "ford442"
|
52 |
FTP_PASS = os.getenv("FTP_PASS")
|
|
|
7 |
|
8 |
import spaces
|
9 |
import os
|
10 |
+
|
11 |
+
os.putenv('PYTORCH_NVML_BASED_CUDA_CHECK','1')
|
12 |
+
os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
|
13 |
+
alloc_conf_parts = [
|
14 |
+
'expandable_segments:True',
|
15 |
+
'pinned_use_background_threads:True' # Specific to pinned memory.
|
16 |
+
]
|
17 |
+
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = ','.join(alloc_conf_parts)
|
18 |
+
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
19 |
+
os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
|
20 |
+
|
21 |
import random
|
22 |
import uuid
|
23 |
import re
|
|
|
51 |
torch.backends.cudnn.allow_tf32 = False
|
52 |
torch.backends.cudnn.deterministic = False
|
53 |
torch.backends.cudnn.benchmark = False
|
54 |
+
torch.backends.cuda.preferred_blas_library="cublas"
|
55 |
+
torch.backends.cuda.preferred_linalg_library="cusolver"
|
56 |
torch.set_float32_matmul_precision("highest")
|
57 |
|
|
|
|
|
|
|
58 |
FTP_HOST = "1ink.us"
|
59 |
FTP_USER = "ford442"
|
60 |
FTP_PASS = os.getenv("FTP_PASS")
|