Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
Β·
4f8f050
1
Parent(s):
a1fa8d5
add new turbo mode
Browse files- src/app/engine/presets.ts +13 -5
- src/app/engine/render.ts +28 -2
- src/app/interface/panel/index.tsx +40 -11
- src/app/interface/settings-dialog/defaultSettings.ts +1 -0
- src/app/interface/settings-dialog/getSettings.ts +2 -0
- src/app/interface/settings-dialog/index.tsx +22 -1
- src/app/interface/settings-dialog/localStorageKeys.ts +1 -0
- src/app/main.tsx +14 -8
- src/app/queries/getStory.ts +5 -4
- src/lib/getValidBoolean.ts +9 -0
- src/types.ts +9 -0
src/app/engine/presets.ts
CHANGED
|
@@ -57,7 +57,7 @@ export const presets: Record<string, Preset> = {
|
|
| 57 |
llmPrompt: "japanese manga",
|
| 58 |
imagePrompt: (prompt: string) => [
|
| 59 |
`grayscale`,
|
| 60 |
-
`
|
| 61 |
`japanese manga`,
|
| 62 |
prompt,
|
| 63 |
// "single panel",
|
|
@@ -90,6 +90,7 @@ export const presets: Record<string, Preset> = {
|
|
| 90 |
"ancient japanese painting",
|
| 91 |
"intricate",
|
| 92 |
"detailed",
|
|
|
|
| 93 |
// "drawing"
|
| 94 |
],
|
| 95 |
negativePrompt: () => [
|
|
@@ -116,6 +117,7 @@ export const presets: Record<string, Preset> = {
|
|
| 116 |
"franco-belgian comic",
|
| 117 |
`franco-belgian color comic about ${prompt}`,
|
| 118 |
"comic album",
|
|
|
|
| 119 |
// "color drawing"
|
| 120 |
],
|
| 121 |
negativePrompt: () => [
|
|
@@ -139,6 +141,7 @@ export const presets: Record<string, Preset> = {
|
|
| 139 |
imagePrompt: (prompt: string) => [
|
| 140 |
"digital color comicbook style",
|
| 141 |
`modern american comic about ${prompt}`,
|
|
|
|
| 142 |
//"single panel",
|
| 143 |
// "2010s",
|
| 144 |
// "digital print",
|
|
@@ -199,6 +202,7 @@ export const presets: Record<string, Preset> = {
|
|
| 199 |
"1950",
|
| 200 |
"50s",
|
| 201 |
`vintage american color comic about ${prompt}`,
|
|
|
|
| 202 |
// "single panel",
|
| 203 |
// "comicbook style",
|
| 204 |
// "color comicbook",
|
|
@@ -261,6 +265,7 @@ export const presets: Record<string, Preset> = {
|
|
| 261 |
"color pulp comic panel",
|
| 262 |
"1940",
|
| 263 |
`${prompt}`,
|
|
|
|
| 264 |
// "single panel",
|
| 265 |
// "comic album"
|
| 266 |
],
|
|
@@ -287,12 +292,11 @@ export const presets: Record<string, Preset> = {
|
|
| 287 |
`color comic panel`,
|
| 288 |
"style of Moebius",
|
| 289 |
`${prompt}`,
|
| 290 |
-
"
|
| 291 |
"french comic panel",
|
| 292 |
"franco-belgian style",
|
| 293 |
"bande dessinΓ©e",
|
| 294 |
"single panel",
|
| 295 |
-
"intricate"
|
| 296 |
// "comic album"
|
| 297 |
],
|
| 298 |
negativePrompt: () => [
|
|
@@ -406,6 +410,8 @@ export const presets: Record<string, Preset> = {
|
|
| 406 |
`funny`,
|
| 407 |
`Unreal engine`,
|
| 408 |
`${prompt}`,
|
|
|
|
|
|
|
| 409 |
],
|
| 410 |
negativePrompt: () => [
|
| 411 |
"manga",
|
|
@@ -428,8 +434,9 @@ export const presets: Record<string, Preset> = {
|
|
| 428 |
`patchwork`,
|
| 429 |
`style of Gustav Klimt`,
|
| 430 |
`Gustav Klimt painting`,
|
| 431 |
-
`intricate details`,
|
| 432 |
`${prompt}`,
|
|
|
|
|
|
|
| 433 |
],
|
| 434 |
negativePrompt: () => [
|
| 435 |
"manga",
|
|
@@ -451,9 +458,9 @@ export const presets: Record<string, Preset> = {
|
|
| 451 |
`medieval illuminated manuscript`,
|
| 452 |
`illuminated manuscript of`,
|
| 453 |
`medieval`,
|
| 454 |
-
`intricate details`,
|
| 455 |
// `medieval color engraving`,
|
| 456 |
`${prompt}`,
|
|
|
|
| 457 |
// `medieval`
|
| 458 |
],
|
| 459 |
negativePrompt: () => [
|
|
@@ -627,6 +634,7 @@ export const presets: Record<string, Preset> = {
|
|
| 627 |
`instagram`,
|
| 628 |
`photoshoot`,
|
| 629 |
`${prompt}`,
|
|
|
|
| 630 |
],
|
| 631 |
negativePrompt: () => [
|
| 632 |
"manga",
|
|
|
|
| 57 |
llmPrompt: "japanese manga",
|
| 58 |
imagePrompt: (prompt: string) => [
|
| 59 |
`grayscale`,
|
| 60 |
+
`detailed drawing`,
|
| 61 |
`japanese manga`,
|
| 62 |
prompt,
|
| 63 |
// "single panel",
|
|
|
|
| 90 |
"ancient japanese painting",
|
| 91 |
"intricate",
|
| 92 |
"detailed",
|
| 93 |
+
"detailed painting"
|
| 94 |
// "drawing"
|
| 95 |
],
|
| 96 |
negativePrompt: () => [
|
|
|
|
| 117 |
"franco-belgian comic",
|
| 118 |
`franco-belgian color comic about ${prompt}`,
|
| 119 |
"comic album",
|
| 120 |
+
"detailed drawing"
|
| 121 |
// "color drawing"
|
| 122 |
],
|
| 123 |
negativePrompt: () => [
|
|
|
|
| 141 |
imagePrompt: (prompt: string) => [
|
| 142 |
"digital color comicbook style",
|
| 143 |
`modern american comic about ${prompt}`,
|
| 144 |
+
"detailed drawing"
|
| 145 |
//"single panel",
|
| 146 |
// "2010s",
|
| 147 |
// "digital print",
|
|
|
|
| 202 |
"1950",
|
| 203 |
"50s",
|
| 204 |
`vintage american color comic about ${prompt}`,
|
| 205 |
+
"detailed drawing"
|
| 206 |
// "single panel",
|
| 207 |
// "comicbook style",
|
| 208 |
// "color comicbook",
|
|
|
|
| 265 |
"color pulp comic panel",
|
| 266 |
"1940",
|
| 267 |
`${prompt}`,
|
| 268 |
+
"detailed drawing"
|
| 269 |
// "single panel",
|
| 270 |
// "comic album"
|
| 271 |
],
|
|
|
|
| 292 |
`color comic panel`,
|
| 293 |
"style of Moebius",
|
| 294 |
`${prompt}`,
|
| 295 |
+
"detailed drawing",
|
| 296 |
"french comic panel",
|
| 297 |
"franco-belgian style",
|
| 298 |
"bande dessinΓ©e",
|
| 299 |
"single panel",
|
|
|
|
| 300 |
// "comic album"
|
| 301 |
],
|
| 302 |
negativePrompt: () => [
|
|
|
|
| 410 |
`funny`,
|
| 411 |
`Unreal engine`,
|
| 412 |
`${prompt}`,
|
| 413 |
+
`crisp`,
|
| 414 |
+
`sharp`
|
| 415 |
],
|
| 416 |
negativePrompt: () => [
|
| 417 |
"manga",
|
|
|
|
| 434 |
`patchwork`,
|
| 435 |
`style of Gustav Klimt`,
|
| 436 |
`Gustav Klimt painting`,
|
|
|
|
| 437 |
`${prompt}`,
|
| 438 |
+
`detailed painting`,
|
| 439 |
+
`intricate details`
|
| 440 |
],
|
| 441 |
negativePrompt: () => [
|
| 442 |
"manga",
|
|
|
|
| 458 |
`medieval illuminated manuscript`,
|
| 459 |
`illuminated manuscript of`,
|
| 460 |
`medieval`,
|
|
|
|
| 461 |
// `medieval color engraving`,
|
| 462 |
`${prompt}`,
|
| 463 |
+
`intricate details`,
|
| 464 |
// `medieval`
|
| 465 |
],
|
| 466 |
negativePrompt: () => [
|
|
|
|
| 634 |
`instagram`,
|
| 635 |
`photoshoot`,
|
| 636 |
`${prompt}`,
|
| 637 |
+
`crisp details`
|
| 638 |
],
|
| 639 |
negativePrompt: () => [
|
| 640 |
"manga",
|
src/app/engine/render.ts
CHANGED
|
@@ -44,7 +44,6 @@ export async function newRender({
|
|
| 44 |
settings: Settings
|
| 45 |
}) {
|
| 46 |
// throw new Error("Planned maintenance")
|
| 47 |
-
|
| 48 |
if (!prompt) {
|
| 49 |
const error = `cannot call the rendering API without a prompt, aborting..`
|
| 50 |
console.error(error)
|
|
@@ -81,6 +80,8 @@ export async function newRender({
|
|
| 81 |
|
| 82 |
const placeholder = "<USE YOUR OWN TOKEN>"
|
| 83 |
|
|
|
|
|
|
|
| 84 |
if (
|
| 85 |
settings.renderingModelVendor === "OPENAI" &&
|
| 86 |
settings.openaiApiKey &&
|
|
@@ -170,7 +171,7 @@ export async function newRender({
|
|
| 170 |
|
| 171 |
const response = (await res.json()) as { data: { url: string }[] }
|
| 172 |
|
| 173 |
-
console.log("response:", response)
|
| 174 |
return {
|
| 175 |
renderId: uuidv4(),
|
| 176 |
status: "completed",
|
|
@@ -352,6 +353,29 @@ export async function newRender({
|
|
| 352 |
segments: []
|
| 353 |
} as RenderedScene
|
| 354 |
} else {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 355 |
const res = await fetch(`${videochainApiUrl}${videochainApiUrl.endsWith("/") ? "" : "/"}render`, {
|
| 356 |
method: "POST",
|
| 357 |
headers: {
|
|
@@ -375,6 +399,8 @@ export async function newRender({
|
|
| 375 |
// the server is always down
|
| 376 |
upscalingFactor: 1, // 2,
|
| 377 |
|
|
|
|
|
|
|
| 378 |
// analyzing doesn't work yet, it seems..
|
| 379 |
analyze: false, // analyze: true,
|
| 380 |
|
|
|
|
| 44 |
settings: Settings
|
| 45 |
}) {
|
| 46 |
// throw new Error("Planned maintenance")
|
|
|
|
| 47 |
if (!prompt) {
|
| 48 |
const error = `cannot call the rendering API without a prompt, aborting..`
|
| 49 |
console.error(error)
|
|
|
|
| 80 |
|
| 81 |
const placeholder = "<USE YOUR OWN TOKEN>"
|
| 82 |
|
| 83 |
+
// console.log("settings:", JSON.stringify(settings, null, 2))
|
| 84 |
+
|
| 85 |
if (
|
| 86 |
settings.renderingModelVendor === "OPENAI" &&
|
| 87 |
settings.openaiApiKey &&
|
|
|
|
| 171 |
|
| 172 |
const response = (await res.json()) as { data: { url: string }[] }
|
| 173 |
|
| 174 |
+
// console.log("response:", response)
|
| 175 |
return {
|
| 176 |
renderId: uuidv4(),
|
| 177 |
status: "completed",
|
|
|
|
| 353 |
segments: []
|
| 354 |
} as RenderedScene
|
| 355 |
} else {
|
| 356 |
+
console.log("sending:", {
|
| 357 |
+
prompt,
|
| 358 |
+
// negativePrompt, unused for now
|
| 359 |
+
nbFrames: 1,
|
| 360 |
+
nbSteps: nbInferenceSteps, // 20 = fast, 30 = better, 50 = best
|
| 361 |
+
actionnables: [], // ["text block"],
|
| 362 |
+
segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic
|
| 363 |
+
width,
|
| 364 |
+
height,
|
| 365 |
+
|
| 366 |
+
// no need to upscale right now as we generate tiny panels
|
| 367 |
+
// maybe later we can provide an "export" button to PDF
|
| 368 |
+
// unfortunately there are too many requests for upscaling,
|
| 369 |
+
// the server is always down
|
| 370 |
+
upscalingFactor: 1, // 2,
|
| 371 |
+
|
| 372 |
+
turbo: settings.renderingUseTurbo,
|
| 373 |
+
|
| 374 |
+
// analyzing doesn't work yet, it seems..
|
| 375 |
+
analyze: false, // analyze: true,
|
| 376 |
+
|
| 377 |
+
cache: "ignore"
|
| 378 |
+
})
|
| 379 |
const res = await fetch(`${videochainApiUrl}${videochainApiUrl.endsWith("/") ? "" : "/"}render`, {
|
| 380 |
method: "POST",
|
| 381 |
headers: {
|
|
|
|
| 399 |
// the server is always down
|
| 400 |
upscalingFactor: 1, // 2,
|
| 401 |
|
| 402 |
+
turbo: settings.renderingUseTurbo,
|
| 403 |
+
|
| 404 |
// analyzing doesn't work yet, it seems..
|
| 405 |
analyze: false, // analyze: true,
|
| 406 |
|
src/app/interface/panel/index.tsx
CHANGED
|
@@ -140,15 +140,34 @@ export function Panel({
|
|
| 140 |
withCache: revision === 0,
|
| 141 |
settings: getSettings(),
|
| 142 |
})
|
|
|
|
|
|
|
|
|
|
| 143 |
} catch (err) {
|
| 144 |
// "Failed to load the panel! Don't worry, we are retrying..")
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
}
|
| 153 |
|
| 154 |
if (newRendered) {
|
|
@@ -157,17 +176,22 @@ export function Panel({
|
|
| 157 |
if (newRendered.status === "completed") {
|
| 158 |
setGeneratingImages(panelId, false)
|
| 159 |
addToUpscaleQueue(panelId, newRendered)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
}
|
| 161 |
|
| 162 |
-
|
| 163 |
} else {
|
|
|
|
| 164 |
setRendered(panelId, {
|
| 165 |
renderId: "",
|
| 166 |
-
status: "
|
| 167 |
assetUrl: "",
|
| 168 |
alt: "",
|
| 169 |
maskUrl: "",
|
| 170 |
-
error: "",
|
| 171 |
segments: []
|
| 172 |
})
|
| 173 |
setGeneratingImages(panelId, false)
|
|
@@ -198,9 +222,10 @@ export function Panel({
|
|
| 198 |
|
| 199 |
if (newRendered.status === "pending") {
|
| 200 |
timeoutRef.current = setTimeout(checkStatus, delay)
|
| 201 |
-
} else if (newRendered.status === "error" ||
|
| 202 |
(newRendered.status === "completed" && !newRendered.assetUrl?.length)) {
|
| 203 |
try {
|
|
|
|
| 204 |
const newAttempt = await newRender({
|
| 205 |
prompt,
|
| 206 |
width,
|
|
@@ -208,6 +233,9 @@ export function Panel({
|
|
| 208 |
withCache: false,
|
| 209 |
settings: getSettings(),
|
| 210 |
})
|
|
|
|
|
|
|
|
|
|
| 211 |
setRendered(panelId, newAttempt)
|
| 212 |
} catch (err) {
|
| 213 |
console.error("yeah sorry, something is wrong.. aborting", err)
|
|
@@ -217,6 +245,7 @@ export function Panel({
|
|
| 217 |
console.log("panel finished!")
|
| 218 |
setGeneratingImages(panelId, false)
|
| 219 |
addToUpscaleQueue(panelId, newRendered)
|
|
|
|
| 220 |
}
|
| 221 |
} catch (err) {
|
| 222 |
console.error(err)
|
|
|
|
| 140 |
withCache: revision === 0,
|
| 141 |
settings: getSettings(),
|
| 142 |
})
|
| 143 |
+
if (!newRendered.status || newRendered.status === "error") {
|
| 144 |
+
throw new Error("invalid status")
|
| 145 |
+
}
|
| 146 |
} catch (err) {
|
| 147 |
// "Failed to load the panel! Don't worry, we are retrying..")
|
| 148 |
+
|
| 149 |
+
try {
|
| 150 |
+
newRendered = await newRender({
|
| 151 |
+
prompt: cacheInvalidationHack + " " + prompt,
|
| 152 |
+
width,
|
| 153 |
+
height,
|
| 154 |
+
withCache,
|
| 155 |
+
settings: getSettings(),
|
| 156 |
+
})
|
| 157 |
+
if (!newRendered.status || newRendered.status === "error") {
|
| 158 |
+
throw new Error("invalid status")
|
| 159 |
+
}
|
| 160 |
+
} catch (err2) {
|
| 161 |
+
newRendered = {
|
| 162 |
+
renderId: "",
|
| 163 |
+
status: "error",
|
| 164 |
+
assetUrl: "",
|
| 165 |
+
alt: "",
|
| 166 |
+
maskUrl: "",
|
| 167 |
+
error: `${err2 || "unknown error"}`,
|
| 168 |
+
segments: []
|
| 169 |
+
}
|
| 170 |
+
}
|
| 171 |
}
|
| 172 |
|
| 173 |
if (newRendered) {
|
|
|
|
| 176 |
if (newRendered.status === "completed") {
|
| 177 |
setGeneratingImages(panelId, false)
|
| 178 |
addToUpscaleQueue(panelId, newRendered)
|
| 179 |
+
} else if (!newRendered.status || newRendered.status === "error") {
|
| 180 |
+
setGeneratingImages(panelId, false)
|
| 181 |
+
} else {
|
| 182 |
+
// still loading
|
| 183 |
}
|
| 184 |
|
| 185 |
+
|
| 186 |
} else {
|
| 187 |
+
//
|
| 188 |
setRendered(panelId, {
|
| 189 |
renderId: "",
|
| 190 |
+
status: "error",
|
| 191 |
assetUrl: "",
|
| 192 |
alt: "",
|
| 193 |
maskUrl: "",
|
| 194 |
+
error: "empty newRendered",
|
| 195 |
segments: []
|
| 196 |
})
|
| 197 |
setGeneratingImages(panelId, false)
|
|
|
|
| 222 |
|
| 223 |
if (newRendered.status === "pending") {
|
| 224 |
timeoutRef.current = setTimeout(checkStatus, delay)
|
| 225 |
+
} else if (!newRendered.status || newRendered.status === "error" ||
|
| 226 |
(newRendered.status === "completed" && !newRendered.assetUrl?.length)) {
|
| 227 |
try {
|
| 228 |
+
// we try only once
|
| 229 |
const newAttempt = await newRender({
|
| 230 |
prompt,
|
| 231 |
width,
|
|
|
|
| 233 |
withCache: false,
|
| 234 |
settings: getSettings(),
|
| 235 |
})
|
| 236 |
+
if (!newAttempt.status || newAttempt.status === "error") {
|
| 237 |
+
throw new Error("invalid status")
|
| 238 |
+
}
|
| 239 |
setRendered(panelId, newAttempt)
|
| 240 |
} catch (err) {
|
| 241 |
console.error("yeah sorry, something is wrong.. aborting", err)
|
|
|
|
| 245 |
console.log("panel finished!")
|
| 246 |
setGeneratingImages(panelId, false)
|
| 247 |
addToUpscaleQueue(panelId, newRendered)
|
| 248 |
+
|
| 249 |
}
|
| 250 |
} catch (err) {
|
| 251 |
console.error(err)
|
src/app/interface/settings-dialog/defaultSettings.ts
CHANGED
|
@@ -2,6 +2,7 @@ import { RenderingModelVendor, Settings } from "@/types"
|
|
| 2 |
|
| 3 |
export const defaultSettings: Settings = {
|
| 4 |
renderingModelVendor: "SERVER" as RenderingModelVendor,
|
|
|
|
| 5 |
huggingfaceApiKey: "",
|
| 6 |
huggingfaceInferenceApiModel: "stabilityai/stable-diffusion-xl-base-1.0",
|
| 7 |
huggingfaceInferenceApiModelTrigger: "",
|
|
|
|
| 2 |
|
| 3 |
export const defaultSettings: Settings = {
|
| 4 |
renderingModelVendor: "SERVER" as RenderingModelVendor,
|
| 5 |
+
renderingUseTurbo: false,
|
| 6 |
huggingfaceApiKey: "",
|
| 7 |
huggingfaceInferenceApiModel: "stabilityai/stable-diffusion-xl-base-1.0",
|
| 8 |
huggingfaceInferenceApiModelTrigger: "",
|
src/app/interface/settings-dialog/getSettings.ts
CHANGED
|
@@ -3,11 +3,13 @@ import { RenderingModelVendor, Settings } from "@/types"
|
|
| 3 |
import { getValidString } from "@/lib/getValidString"
|
| 4 |
import { localStorageKeys } from "./localStorageKeys"
|
| 5 |
import { defaultSettings } from "./defaultSettings"
|
|
|
|
| 6 |
|
| 7 |
export function getSettings(): Settings {
|
| 8 |
try {
|
| 9 |
return {
|
| 10 |
renderingModelVendor: getValidString(localStorage?.getItem?.(localStorageKeys.renderingModelVendor), defaultSettings.renderingModelVendor) as RenderingModelVendor,
|
|
|
|
| 11 |
huggingfaceApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceApiKey), defaultSettings.huggingfaceApiKey),
|
| 12 |
huggingfaceInferenceApiModel: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceInferenceApiModel), defaultSettings.huggingfaceInferenceApiModel),
|
| 13 |
huggingfaceInferenceApiModelTrigger: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceInferenceApiModelTrigger), defaultSettings.huggingfaceInferenceApiModelTrigger),
|
|
|
|
| 3 |
import { getValidString } from "@/lib/getValidString"
|
| 4 |
import { localStorageKeys } from "./localStorageKeys"
|
| 5 |
import { defaultSettings } from "./defaultSettings"
|
| 6 |
+
import { getValidBoolean } from "@/lib/getValidBoolean"
|
| 7 |
|
| 8 |
export function getSettings(): Settings {
|
| 9 |
try {
|
| 10 |
return {
|
| 11 |
renderingModelVendor: getValidString(localStorage?.getItem?.(localStorageKeys.renderingModelVendor), defaultSettings.renderingModelVendor) as RenderingModelVendor,
|
| 12 |
+
renderingUseTurbo: getValidBoolean(localStorage?.getItem?.(localStorageKeys.renderingUseTurbo), defaultSettings.renderingUseTurbo),
|
| 13 |
huggingfaceApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceApiKey), defaultSettings.huggingfaceApiKey),
|
| 14 |
huggingfaceInferenceApiModel: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceInferenceApiModel), defaultSettings.huggingfaceInferenceApiModel),
|
| 15 |
huggingfaceInferenceApiModelTrigger: getValidString(localStorage?.getItem?.(localStorageKeys.huggingfaceInferenceApiModelTrigger), defaultSettings.huggingfaceInferenceApiModelTrigger),
|
src/app/interface/settings-dialog/index.tsx
CHANGED
|
@@ -18,6 +18,8 @@ import { Label } from "./label"
|
|
| 18 |
import { Field } from "./field"
|
| 19 |
import { localStorageKeys } from "./localStorageKeys"
|
| 20 |
import { defaultSettings } from "./defaultSettings"
|
|
|
|
|
|
|
| 21 |
|
| 22 |
export function SettingsDialog() {
|
| 23 |
const [isOpen, setOpen] = useState(false)
|
|
@@ -25,6 +27,10 @@ export function SettingsDialog() {
|
|
| 25 |
localStorageKeys.renderingModelVendor,
|
| 26 |
defaultSettings.renderingModelVendor
|
| 27 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
const [huggingfaceApiKey, setHuggingfaceApiKey] = useLocalStorage<string>(
|
| 29 |
localStorageKeys.huggingfaceApiKey,
|
| 30 |
defaultSettings.huggingfaceApiKey
|
|
@@ -71,7 +77,7 @@ export function SettingsDialog() {
|
|
| 71 |
</div>
|
| 72 |
</Button>
|
| 73 |
</DialogTrigger>
|
| 74 |
-
<DialogContent className="w-full sm:max-w-[500px] md:max-w-[700px] overflow-y-auto h-[100vh] md:h-[80vh]">
|
| 75 |
<DialogHeader>
|
| 76 |
<DialogDescription className="w-full text-center text-lg font-bold text-stone-800">
|
| 77 |
Custom Settings
|
|
@@ -103,6 +109,21 @@ export function SettingsDialog() {
|
|
| 103 |
</Select>
|
| 104 |
</Field>
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
{renderingModelVendor === "HUGGINGFACE" && <>
|
| 107 |
<Field>
|
| 108 |
<Label>Hugging Face API Token (<a className="text-stone-600 underline" href="https://huggingface.co/subscribe/pro" target="_blank">PRO account</a> recommended for higher rate limit):</Label>
|
|
|
|
| 18 |
import { Field } from "./field"
|
| 19 |
import { localStorageKeys } from "./localStorageKeys"
|
| 20 |
import { defaultSettings } from "./defaultSettings"
|
| 21 |
+
import { Switch } from "@/components/ui/switch"
|
| 22 |
+
import { cn } from "@/lib/utils"
|
| 23 |
|
| 24 |
export function SettingsDialog() {
|
| 25 |
const [isOpen, setOpen] = useState(false)
|
|
|
|
| 27 |
localStorageKeys.renderingModelVendor,
|
| 28 |
defaultSettings.renderingModelVendor
|
| 29 |
)
|
| 30 |
+
const [renderingUseTurbo, setRenderingUseTurbo] = useLocalStorage<boolean>(
|
| 31 |
+
localStorageKeys.renderingUseTurbo,
|
| 32 |
+
defaultSettings.renderingUseTurbo
|
| 33 |
+
)
|
| 34 |
const [huggingfaceApiKey, setHuggingfaceApiKey] = useLocalStorage<string>(
|
| 35 |
localStorageKeys.huggingfaceApiKey,
|
| 36 |
defaultSettings.huggingfaceApiKey
|
|
|
|
| 77 |
</div>
|
| 78 |
</Button>
|
| 79 |
</DialogTrigger>
|
| 80 |
+
<DialogContent className="w-full sm:max-w-[500px] md:max-w-[700px] overflow-y-auto h-max-[100vh] md:h-max-[80vh]">
|
| 81 |
<DialogHeader>
|
| 82 |
<DialogDescription className="w-full text-center text-lg font-bold text-stone-800">
|
| 83 |
Custom Settings
|
|
|
|
| 109 |
</Select>
|
| 110 |
</Field>
|
| 111 |
|
| 112 |
+
{renderingModelVendor === "SERVER" && <>
|
| 113 |
+
<Field>
|
| 114 |
+
<Label>Quality over performance ratio:</Label>
|
| 115 |
+
<div className="flex flex-row space-x-2 text-zinc-500">
|
| 116 |
+
<Switch
|
| 117 |
+
checked={renderingUseTurbo}
|
| 118 |
+
onCheckedChange={setRenderingUseTurbo}
|
| 119 |
+
/>
|
| 120 |
+
<span
|
| 121 |
+
onClick={() => setRenderingUseTurbo(!renderingUseTurbo)}
|
| 122 |
+
className={cn("cursor-pointer", { "text-zinc-800": renderingUseTurbo })}>Use a faster model, but with inferior quality of images (you are warned!).</span>
|
| 123 |
+
</div>
|
| 124 |
+
</Field>
|
| 125 |
+
</>}
|
| 126 |
+
|
| 127 |
{renderingModelVendor === "HUGGINGFACE" && <>
|
| 128 |
<Field>
|
| 129 |
<Label>Hugging Face API Token (<a className="text-stone-600 underline" href="https://huggingface.co/subscribe/pro" target="_blank">PRO account</a> recommended for higher rate limit):</Label>
|
src/app/interface/settings-dialog/localStorageKeys.ts
CHANGED
|
@@ -2,6 +2,7 @@ import { Settings } from "@/types"
|
|
| 2 |
|
| 3 |
export const localStorageKeys: Record<keyof Settings, string> = {
|
| 4 |
renderingModelVendor: "CONF_RENDERING_MODEL_VENDOR",
|
|
|
|
| 5 |
huggingfaceApiKey: "CONF_AUTH_HF_API_TOKEN",
|
| 6 |
huggingfaceInferenceApiModel: "CONF_RENDERING_HF_INFERENCE_API_BASE_MODEL",
|
| 7 |
huggingfaceInferenceApiModelTrigger: "CONF_RENDERING_HF_INFERENCE_API_BASE_MODEL_TRIGGER",
|
|
|
|
| 2 |
|
| 3 |
export const localStorageKeys: Record<keyof Settings, string> = {
|
| 4 |
renderingModelVendor: "CONF_RENDERING_MODEL_VENDOR",
|
| 5 |
+
renderingUseTurbo: "CONF_RENDERING_USE_TURBO",
|
| 6 |
huggingfaceApiKey: "CONF_AUTH_HF_API_TOKEN",
|
| 7 |
huggingfaceInferenceApiModel: "CONF_RENDERING_HF_INFERENCE_API_BASE_MODEL",
|
| 8 |
huggingfaceInferenceApiModelTrigger: "CONF_RENDERING_HF_INFERENCE_API_BASE_MODEL_TRIGGER",
|
src/app/main.tsx
CHANGED
|
@@ -45,7 +45,7 @@ export default function Main() {
|
|
| 45 |
|
| 46 |
let llmResponse: LLMResponse = []
|
| 47 |
|
| 48 |
-
const [stylePrompt, userStoryPrompt] = prompt.split("||")
|
| 49 |
|
| 50 |
try {
|
| 51 |
llmResponse = await getStory({
|
|
@@ -57,7 +57,9 @@ export default function Main() {
|
|
| 57 |
// + the LLM may reject some of the styles
|
| 58 |
// stylePrompt ? `in the following context: ${stylePrompt}` : ''
|
| 59 |
|
| 60 |
-
].filter(x => x).join(", "),
|
|
|
|
|
|
|
| 61 |
console.log("LLM responded:", llmResponse)
|
| 62 |
|
| 63 |
} catch (err) {
|
|
@@ -68,7 +70,11 @@ export default function Main() {
|
|
| 68 |
for (let p = 0; p < nbTotalPanels; p++) {
|
| 69 |
llmResponse.push({
|
| 70 |
panel: p,
|
| 71 |
-
instructions:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
caption: "(Sorry, LLM generation failed: using degraded mode)"
|
| 73 |
})
|
| 74 |
}
|
|
@@ -77,20 +83,20 @@ export default function Main() {
|
|
| 77 |
|
| 78 |
// we have to limit the size of the prompt, otherwise the rest of the style won't be followed
|
| 79 |
|
| 80 |
-
let limitedStylePrompt = stylePrompt.slice(0, 77)
|
| 81 |
if (limitedStylePrompt.length !== stylePrompt.length) {
|
| 82 |
console.log("Sorry folks, the style prompt was cut to:", limitedStylePrompt)
|
| 83 |
}
|
| 84 |
|
| 85 |
// new experimental prompt: let's drop the user prompt, and only use the style
|
| 86 |
-
const lightPanelPromptPrefix = preset.imagePrompt(limitedStylePrompt).filter(x => x).join(", ")
|
| 87 |
|
| 88 |
// this prompt will be used if the LLM generation failed
|
| 89 |
const degradedPanelPromptPrefix = [
|
| 90 |
...preset.imagePrompt(limitedStylePrompt),
|
| 91 |
|
| 92 |
// we re-inject the story, then
|
| 93 |
-
userStoryPrompt,
|
| 94 |
].filter(x => x).join(", ")
|
| 95 |
|
| 96 |
const newPanels: string[] = []
|
|
@@ -98,7 +104,7 @@ export default function Main() {
|
|
| 98 |
setWaitABitMore(true)
|
| 99 |
console.log("Panel prompts for SDXL:")
|
| 100 |
for (let p = 0; p < nbTotalPanels; p++) {
|
| 101 |
-
newCaptions.push(llmResponse[p]?.caption || "...")
|
| 102 |
const newPanel = [
|
| 103 |
|
| 104 |
// what we do here is that ideally we give full control to the LLM for prompting,
|
|
@@ -108,7 +114,7 @@ export default function Main() {
|
|
| 108 |
: degradedPanelPromptPrefix,
|
| 109 |
|
| 110 |
llmResponse[p]?.instructions || ""
|
| 111 |
-
].map(
|
| 112 |
newPanels.push(newPanel)
|
| 113 |
console.log(newPanel)
|
| 114 |
}
|
|
|
|
| 45 |
|
| 46 |
let llmResponse: LLMResponse = []
|
| 47 |
|
| 48 |
+
const [stylePrompt, userStoryPrompt] = prompt.split("||").map(x => x.trim())
|
| 49 |
|
| 50 |
try {
|
| 51 |
llmResponse = await getStory({
|
|
|
|
| 57 |
// + the LLM may reject some of the styles
|
| 58 |
// stylePrompt ? `in the following context: ${stylePrompt}` : ''
|
| 59 |
|
| 60 |
+
].map(x => x.trim()).filter(x => x).join(", "),
|
| 61 |
+
nbTotalPanels
|
| 62 |
+
})
|
| 63 |
console.log("LLM responded:", llmResponse)
|
| 64 |
|
| 65 |
} catch (err) {
|
|
|
|
| 70 |
for (let p = 0; p < nbTotalPanels; p++) {
|
| 71 |
llmResponse.push({
|
| 72 |
panel: p,
|
| 73 |
+
instructions: [
|
| 74 |
+
stylePrompt,
|
| 75 |
+
userStoryPrompt,
|
| 76 |
+
`${".".repeat(p)}`,
|
| 77 |
+
].map(x => x.trim()).filter(x => x).join(", "),
|
| 78 |
caption: "(Sorry, LLM generation failed: using degraded mode)"
|
| 79 |
})
|
| 80 |
}
|
|
|
|
| 83 |
|
| 84 |
// we have to limit the size of the prompt, otherwise the rest of the style won't be followed
|
| 85 |
|
| 86 |
+
let limitedStylePrompt = stylePrompt.trim().slice(0, 77).trim()
|
| 87 |
if (limitedStylePrompt.length !== stylePrompt.length) {
|
| 88 |
console.log("Sorry folks, the style prompt was cut to:", limitedStylePrompt)
|
| 89 |
}
|
| 90 |
|
| 91 |
// new experimental prompt: let's drop the user prompt, and only use the style
|
| 92 |
+
const lightPanelPromptPrefix = preset.imagePrompt(limitedStylePrompt).map(x => x.trim()).filter(x => x).join(", ")
|
| 93 |
|
| 94 |
// this prompt will be used if the LLM generation failed
|
| 95 |
const degradedPanelPromptPrefix = [
|
| 96 |
...preset.imagePrompt(limitedStylePrompt),
|
| 97 |
|
| 98 |
// we re-inject the story, then
|
| 99 |
+
userStoryPrompt.trim(),
|
| 100 |
].filter(x => x).join(", ")
|
| 101 |
|
| 102 |
const newPanels: string[] = []
|
|
|
|
| 104 |
setWaitABitMore(true)
|
| 105 |
console.log("Panel prompts for SDXL:")
|
| 106 |
for (let p = 0; p < nbTotalPanels; p++) {
|
| 107 |
+
newCaptions.push(llmResponse[p]?.caption.trim() || "...")
|
| 108 |
const newPanel = [
|
| 109 |
|
| 110 |
// what we do here is that ideally we give full control to the LLM for prompting,
|
|
|
|
| 114 |
: degradedPanelPromptPrefix,
|
| 115 |
|
| 116 |
llmResponse[p]?.instructions || ""
|
| 117 |
+
].map(x => x.trim()).filter(x => x).join(", ")
|
| 118 |
newPanels.push(newPanel)
|
| 119 |
console.log(newPanel)
|
| 120 |
}
|
src/app/queries/getStory.ts
CHANGED
|
@@ -43,19 +43,20 @@ export const getStory = async ({
|
|
| 43 |
let result = ""
|
| 44 |
|
| 45 |
try {
|
|
|
|
| 46 |
result = `${await predict(query, nbTotalPanels) || ""}`.trim()
|
| 47 |
if (!result.length) {
|
| 48 |
throw new Error("empty result!")
|
| 49 |
}
|
| 50 |
} catch (err) {
|
| 51 |
-
console.log(`prediction of the story failed, trying again..`)
|
| 52 |
try {
|
| 53 |
result = `${await predict(query+".", nbTotalPanels) || ""}`.trim()
|
| 54 |
if (!result.length) {
|
| 55 |
throw new Error("empty result!")
|
| 56 |
}
|
| 57 |
} catch (err) {
|
| 58 |
-
console.error(`prediction of the story failed again
|
| 59 |
throw new Error(`failed to generate the story ${err}`)
|
| 60 |
}
|
| 61 |
}
|
|
@@ -68,8 +69,8 @@ export const getStory = async ({
|
|
| 68 |
try {
|
| 69 |
llmResponse = dirtyLLMJsonParser(tmp)
|
| 70 |
} catch (err) {
|
| 71 |
-
console.log(`failed to read LLM response: ${err}`)
|
| 72 |
-
console.log(`original response was:`, result)
|
| 73 |
|
| 74 |
// in case of failure here, it might be because the LLM hallucinated a completely different response,
|
| 75 |
// such as markdown. There is no real solution.. but we can try a fallback:
|
|
|
|
| 43 |
let result = ""
|
| 44 |
|
| 45 |
try {
|
| 46 |
+
// console.log(`calling predict(${query}, ${nbTotalPanels})`)
|
| 47 |
result = `${await predict(query, nbTotalPanels) || ""}`.trim()
|
| 48 |
if (!result.length) {
|
| 49 |
throw new Error("empty result!")
|
| 50 |
}
|
| 51 |
} catch (err) {
|
| 52 |
+
// console.log(`prediction of the story failed, trying again..`)
|
| 53 |
try {
|
| 54 |
result = `${await predict(query+".", nbTotalPanels) || ""}`.trim()
|
| 55 |
if (!result.length) {
|
| 56 |
throw new Error("empty result!")
|
| 57 |
}
|
| 58 |
} catch (err) {
|
| 59 |
+
console.error(`prediction of the story failed again π©`)
|
| 60 |
throw new Error(`failed to generate the story ${err}`)
|
| 61 |
}
|
| 62 |
}
|
|
|
|
| 69 |
try {
|
| 70 |
llmResponse = dirtyLLMJsonParser(tmp)
|
| 71 |
} catch (err) {
|
| 72 |
+
// console.log(`failed to read LLM response: ${err}`)
|
| 73 |
+
// console.log(`original response was:`, result)
|
| 74 |
|
| 75 |
// in case of failure here, it might be because the LLM hallucinated a completely different response,
|
| 76 |
// such as markdown. There is no real solution.. but we can try a fallback:
|
src/lib/getValidBoolean.ts
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export const getValidBoolean = (something: any, defaultValue: boolean) => {
|
| 2 |
+
if (typeof something === "boolean") {
|
| 3 |
+
return something
|
| 4 |
+
}
|
| 5 |
+
|
| 6 |
+
const strValue = `${something || defaultValue}`.toLowerCase()
|
| 7 |
+
|
| 8 |
+
return strValue === "true" || strValue === "1" || strValue === "on"
|
| 9 |
+
}
|
src/types.ts
CHANGED
|
@@ -39,6 +39,14 @@ export interface RenderRequest {
|
|
| 39 |
|
| 40 |
projection: ProjectionMode
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
cache: CacheMode
|
| 43 |
|
| 44 |
wait: boolean // wait until the job is completed
|
|
@@ -142,6 +150,7 @@ export type LayoutProps = {
|
|
| 142 |
|
| 143 |
export type Settings = {
|
| 144 |
renderingModelVendor: RenderingModelVendor
|
|
|
|
| 145 |
huggingfaceApiKey: string
|
| 146 |
huggingfaceInferenceApiModel: string
|
| 147 |
huggingfaceInferenceApiModelTrigger: string
|
|
|
|
| 39 |
|
| 40 |
projection: ProjectionMode
|
| 41 |
|
| 42 |
+
/**
|
| 43 |
+
* Use turbo mode
|
| 44 |
+
*
|
| 45 |
+
* At the time of writing this will use SSD-1B + LCM
|
| 46 |
+
* https://huggingface.co/spaces/jbilcke-hf/fast-image-server
|
| 47 |
+
*/
|
| 48 |
+
turbo: boolean
|
| 49 |
+
|
| 50 |
cache: CacheMode
|
| 51 |
|
| 52 |
wait: boolean // wait until the job is completed
|
|
|
|
| 150 |
|
| 151 |
export type Settings = {
|
| 152 |
renderingModelVendor: RenderingModelVendor
|
| 153 |
+
renderingUseTurbo: boolean
|
| 154 |
huggingfaceApiKey: string
|
| 155 |
huggingfaceInferenceApiModel: string
|
| 156 |
huggingfaceInferenceApiModelTrigger: string
|