Update README.md
Browse files
README.md
CHANGED
@@ -58,6 +58,7 @@ Using [llama.cpp quantize cae9fb4](https://github.com/ggerganov/llama.cpp/commit
|
|
58 |
| - | Q4_K | TBC | TBC | - |
|
59 |
| [flux1-dev-Q4_K_S.gguf](https://huggingface.co/Eviation/flux-imatrix/blob/main/experimental-from-q8/flux1-dev-Q4_K_S.gguf) | Q4_K_S | 6.79GB | TBC | - |
|
60 |
| [flux1-dev-Q4_K_M.gguf](https://huggingface.co/Eviation/flux-imatrix/blob/main/experimental-from-q8/flux1-dev-Q4_K_M.gguf) | Q4_K_M | 6.93GB | TBC | - |
|
|
|
61 |
| - | Q5_K | TBC | TBC | - |
|
62 |
| - | Q5_K_S | TBC | TBC | - |
|
63 |
| - | Q5_K_M | TBC | TBC | - |
|
|
|
58 |
| - | Q4_K | TBC | TBC | - |
|
59 |
| [flux1-dev-Q4_K_S.gguf](https://huggingface.co/Eviation/flux-imatrix/blob/main/experimental-from-q8/flux1-dev-Q4_K_S.gguf) | Q4_K_S | 6.79GB | TBC | - |
|
60 |
| [flux1-dev-Q4_K_M.gguf](https://huggingface.co/Eviation/flux-imatrix/blob/main/experimental-from-q8/flux1-dev-Q4_K_M.gguf) | Q4_K_M | 6.93GB | TBC | - |
|
61 |
+
| [flux1-dev-Q4_1.gguf](https://huggingface.co/Eviation/flux-imatrix/blob/main/experimental-from-q8/flux1-dev-Q4_1.gguf) | Q4_0 | 7.53GB | TBC | - |
|
62 |
| - | Q5_K | TBC | TBC | - |
|
63 |
| - | Q5_K_S | TBC | TBC | - |
|
64 |
| - | Q5_K_M | TBC | TBC | - |
|