Not supported by LLM Studio
This model is currently not supported by LLMstudio. Could you contact their team to expedite the release of a new version that supports this model? We need to run the 128k version of Qwen3 in LLMstudio!
The model can be loaded in but ask any question will get error message like like the following and the prompt template provided is not working:
Failed to regenerate message
Error rendering prompt with jinja template: "Error: Parser Error: Expected closing statement token. OpenSquareBracket !== CloseStatement.
at _0xc1039d (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227215)
at /Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227817
at _0x570f07 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:230141)
at _0xb7a340 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:231644)
at /Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227923
at _0x570f07 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:230141)
at _0xb7a340 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:232285)
at /Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227923
at _0x570f07 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:230141)
at _0xb7a340 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:231644)". This is usually an issue with the model's prompt template. If you are using a popular model, you can try to search the model under lmstudio-community, which will have fixed prompt templates. If you cannot find one, you are welcome to post this issue to our discord or issue tracker on GitHub. Alternatively, if you know how to write jinja templates, you can override the prompt template in My Models > model settings > Prompt Template.
The prompt template I used from here(https://docs.unsloth.ai/basics/qwen3-how-to-run-and-fine-tune):
<|im_start|>user\nWhat is 2+2?<|im_end|>\n<|im_start|>assistant\n
And I'm using LLM Studio on mac with M3 max chip.
This model is currently not supported by LLMstudio. Could you contact their team to expedite the release of a new version that supports this model? We need to run the 128k version of Qwen3 in LLMstudio!
Hey, just curious how did you conclude that this model is not supported by LM Studio? I use LM Studio as well, the model loads and starts writing response correctly, however at some point (usually couple of thousands tokens into writing its response) it bugs out somehow and starts repeating stuff seemingly endlessly, so I was wondering if you encountered the same issue. To illustrate, this is the snippet of the code it started generating, but obviously didn't finish as I had to cancel it after it started writing garbage.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF- 8">
<title>Mandelbrot Sphere Simulator</title>
<style>
html,body{
margin:0;
padding:0;
overflow:hidden;
height:100%;
width:100%;
}
canvas{
display:block;
margin:auto;
}
</style>
</head>
<body>
<canvas id="canvas"></canvas>
<script>
const canvas = document.getElementById("canvas");
const gl = canvas.getContext("webgl2");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
function resizeCanvas() {
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
}
window.addEventListener('resize', resizeCanvas);
// Normalize canvas aspect ratio to square sphere projection🌍
function getAspectMatrix() {
return mat4.scale(mat4.create(), mat4.create(), [canvas.width / canvas.height, canvas.width / canvas.height, canvas.width / canvas.height]);
}
// Basic math utilities
const mat4 = glMatrix.mat4;
const vec3 = glMatrix.vec3;
// Create shader
function createShader(gl, type, source) {
const shader = gl.createShader(type);
gl.shaderSource(shader, source);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.error(gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
function createProgram(gl, vsShader, fsShader) {
const program = gl.createProgram();
gl.attachShader(program, vsShader);
gl.attachShader(program, fsShader);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error(gl.getProgramInfoLog(program));
gl.deleteProgram(program);
return null;
}
return program;
}
const vertexShaderSource = `
attribute vec2 position;
uniform mat4 projectionMatrix;
uniform mat4 modelMatrix;
uniform float zoom;
uniform vec2 offset;
varying vec2 fragCoord;
void main() {
vec4 pos = modelMatrix * vec4(position.xy, zoom, zoom);
fragCoord = projectionMatrix * pos;
gl_Position = fragCoord;
}
`;
const fragmentShaderSource = `
precision highp float;
uniform sampler2D mandelbrotMap;
uniform vec2 resolution;
uniform float time;
varying vec2 fragCoord;
void main() {
vec2 uv = fragCoord.xy / resolution.xy;
vec4 color = texture2D(mandelbrotMap, uv);
// Earth-like color palette
vec3 earthColor(vec4 color) {
vec3 col;
col = vec3(0.0);
float r = color.r;
if(r < 0.3) col = vec3(0.0); // Deep ocean
else if(r < 0.5) col = vec3(0.0, color.g * color.g * color.g * color.g * color.g * color.g * color.g * color.g); // Shallow ocean
else if(r < 0.7) col = vec3(color.r * color.r, color.g * color.g * color.g * color.g); // Land and deserts
else col = vec3(color.r * color.r * color.r * color.r); // White snow/ice
return col;
}
gl_FragColor = vec4(earthColor(texture2D(mandelbrotMap, uv)), color.a);
}
`;
// 2D Mandelbrot shader
const mandelbrotShaderSource = `
precision highp float;
uniform vec2 resolution;
uniform vec2 offset;
uniform float zoom;
uniform vec2 center;
void main() {
vec2 uv = resolution.xy * fract(gl_FragCoord / resolution.xy);
uv -= resolution.xy * zoom;
uv += offset;
vec2 uvCentered = uv / resolution.xy * zoom;
uvCentered -= center;
vec2 uvMapped = uvCentered;
float cx = uvMapped.x;
float cy = uvMapped.y;
float zx = cx;
float zy = cy;
float i = 0.0;
while(i < 100.0) {
if (zx*zx + zy*zy > 4.0) break;
float zxNext = zx*zx - zy*zy + cx;
float zyNext = zx*zy*2.0 + cy;
zx = zxNext;
zy = zyNext;
i += 1.0;
}
float escapeTime = clamp(i, 0.0,FRING 100.0000000000000123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123
This model is currently not supported by LLMstudio. Could you contact their team to expedite the release of a new version that supports this model? We need to run the 128k version of Qwen3 in LLMstudio!
The model can be loaded in but ask any question will get error message like like:
Failed to regenerate message
Error rendering prompt with jinja template: "Error: Parser Error: Expected closing statement token. OpenSquareBracket !== CloseStatement.
at _0xc1039d (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227215)
at /Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227817
at _0x570f07 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:230141)
at _0xb7a340 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:231644)
at /Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227923
at _0x570f07 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:230141)
at _0xb7a340 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:232285)
at /Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:227923
at _0x570f07 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:230141)
at _0xb7a340 (/Applications/LM Studio.app/Contents/Resources/app/.webpack/lib/llmworker.js:114:231644)". This is usually an issue with the model's prompt template. If you are using a popular model, you can try to search the model under lmstudio-community, which will have fixed prompt templates. If you cannot find one, you are welcome to post this issue to our discord or issue tracker on GitHub. Alternatively, if you know how to write jinja templates, you can override the prompt template in My Models > model settings > Prompt Template.
But this error you get only happens when you're connecting to the API server the LM Studio is running, doesn't it? In LM Studio, open the "My Models" section (the red open folder icon on the left-hand side of the main screen of the app), click the gear icon next to this model's name and on the right-hand side of the screen choose the "Prompt" tab. On the right-bottom part of the screen you should see the "Prompt Template" with two options: "Template (Jinja)" and "Manual". Make sure the first option "Template (Jinja)" is selected and check the little text box under it. Does it have a bright red border with some warnings / errors under it written in bright red color? If so, the template is wrong and you'll need to replace it manually. If that's the case, try to delete the template from that little text box leaving the text box completely blank and paste the following template into it:
{%- if tools %}
{{- '<|im_start|>system\n' }}
{%- if messages[0].role == 'system' %}
{{- messages[0].content + '\n\n' }}
{%- endif %}
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
{%- else %}
{%- if messages[0].role == 'system' %}
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
{%- for forward_message in messages %}
{%- set index = (messages|length - 1) - loop.index0 %}
{%- set message = messages[index] %}
{%- set tool_start = '<tool_response>' %}
{%- set tool_start_length = tool_start|length %}
{%- set start_of_message = message.content[:tool_start_length] %}
{%- set tool_end = '</tool_response>' %}
{%- set tool_end_length = tool_end|length %}
{%- set start_pos = (message.content|length) - tool_end_length %}
{%- if start_pos < 0 %}
{%- set start_pos = 0 %}
{%- endif %}
{%- set end_of_message = message.content[start_pos:] %}
{%- if ns.multi_step_tool and message.role == "user" and not(start_of_message == tool_start and end_of_message == tool_end) %}
{%- set ns.multi_step_tool = false %}
{%- set ns.last_query_index = index %}
{%- endif %}
{%- endfor %}
{%- for message in messages %}
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
{%- elif message.role == "assistant" %}
{%- set content = message.content %}
{%- set reasoning_content = '' %}
{%- if message.reasoning_content is defined and message.reasoning_content is not none %}
{%- set reasoning_content = message.reasoning_content %}
{%- else %}
{%- if '</think>' in message.content %}
{%- set content = (message.content.split('</think>')|last).lstrip('\n') %}
{%- set reasoning_content = (message.content.split('</think>')|first).rstrip('\n') %}
{%- set reasoning_content = (reasoning_content.split('<think>')|last).lstrip('\n') %}
{%- endif %}
{%- endif %}
{%- if loop.index0 > ns.last_query_index %}
{%- if loop.last or (not loop.last and reasoning_content) %}
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
{%- else %}
{{- '<|im_start|>' + message.role + '\n' + content }}
{%- endif %}
{%- else %}
{{- '<|im_start|>' + message.role + '\n' + content }}
{%- endif %}
{%- if message.tool_calls %}
{%- for tool_call in message.tool_calls %}
{%- if (loop.first and content) or (not loop.first) %}
{{- '\n' }}
{%- endif %}
{%- if tool_call.function %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{%- if tool_call.arguments is string %}
{{- tool_call.arguments }}
{%- else %}
{{- tool_call.arguments | tojson }}
{%- endif %}
{{- '}\n</tool_call>' }}
{%- endfor %}
{%- endif %}
{{- '<|im_end|>\n' }}
{%- elif message.role == "tool" %}
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
{{- '<|im_start|>user' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- message.content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- if enable_thinking is defined and enable_thinking is false %}
{{- '<think>\n\n</think>\n\n' }}
{%- endif %}
{%- endif %}
Once you do that, you can go back to the main screen of the app and it should work in API server correctly.
This model is currently not supported by LLMstudio. Could you contact their team to expedite the release of a new version that supports this model? We need to run the 128k version of Qwen3 in LLMstudio!
Hey, just curious how did you conclude that this model is not supported by LM Studio? I use LM Studio as well, the model loads and starts writing response correctly, however at some point (usually couple of thousands tokens into writing its response) it bugs out somehow and starts repeating stuff seemingly endlessly, so I was wondering if you encountered the same issue. To illustrate, this is the snippet of the code it started generating, but obviously didn't finish as I had to cancel it after it started writing garbage.
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF- 8"> <title>Mandelbrot Sphere Simulator</title> <style> html,body{ margin:0; padding:0; overflow:hidden; height:100%; width:100%; } canvas{ display:block; margin:auto; } </style> </head> <body> <canvas id="canvas"></canvas> <script> const canvas = document.getElementById("canvas"); const gl = canvas.getContext("webgl2"); canvas.width = window.innerWidth; canvas.height = window.innerHeight; function resizeCanvas() { canvas.width = window.innerWidth; canvas.height = window.innerHeight; } window.addEventListener('resize', resizeCanvas); // Normalize canvas aspect ratio to square sphere projection🌍 function getAspectMatrix() { return mat4.scale(mat4.create(), mat4.create(), [canvas.width / canvas.height, canvas.width / canvas.height, canvas.width / canvas.height]); } // Basic math utilities const mat4 = glMatrix.mat4; const vec3 = glMatrix.vec3; // Create shader function createShader(gl, type, source) { const shader = gl.createShader(type); gl.shaderSource(shader, source); gl.compileShader(shader); if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) { console.error(gl.getShaderInfoLog(shader)); gl.deleteShader(shader); return null; } return shader; } function createProgram(gl, vsShader, fsShader) { const program = gl.createProgram(); gl.attachShader(program, vsShader); gl.attachShader(program, fsShader); gl.linkProgram(program); if (!gl.getProgramParameter(program, gl.LINK_STATUS)) { console.error(gl.getProgramInfoLog(program)); gl.deleteProgram(program); return null; } return program; } const vertexShaderSource = ` attribute vec2 position; uniform mat4 projectionMatrix; uniform mat4 modelMatrix; uniform float zoom; uniform vec2 offset; varying vec2 fragCoord; void main() { vec4 pos = modelMatrix * vec4(position.xy, zoom, zoom); fragCoord = projectionMatrix * pos; gl_Position = fragCoord; } `; const fragmentShaderSource = ` precision highp float; uniform sampler2D mandelbrotMap; uniform vec2 resolution; uniform float time; varying vec2 fragCoord; void main() { vec2 uv = fragCoord.xy / resolution.xy; vec4 color = texture2D(mandelbrotMap, uv); // Earth-like color palette vec3 earthColor(vec4 color) { vec3 col; col = vec3(0.0); float r = color.r; if(r < 0.3) col = vec3(0.0); // Deep ocean else if(r < 0.5) col = vec3(0.0, color.g * color.g * color.g * color.g * color.g * color.g * color.g * color.g); // Shallow ocean else if(r < 0.7) col = vec3(color.r * color.r, color.g * color.g * color.g * color.g); // Land and deserts else col = vec3(color.r * color.r * color.r * color.r); // White snow/ice return col; } gl_FragColor = vec4(earthColor(texture2D(mandelbrotMap, uv)), color.a); } `; // 2D Mandelbrot shader const mandelbrotShaderSource = ` precision highp float; uniform vec2 resolution; uniform vec2 offset; uniform float zoom; uniform vec2 center; void main() { vec2 uv = resolution.xy * fract(gl_FragCoord / resolution.xy); uv -= resolution.xy * zoom; uv += offset; vec2 uvCentered = uv / resolution.xy * zoom; uvCentered -= center; vec2 uvMapped = uvCentered; float cx = uvMapped.x; float cy = uvMapped.y; float zx = cx; float zy = cy; float i = 0.0; while(i < 100.0) { if (zx*zx + zy*zy > 4.0) break; float zxNext = zx*zx - zy*zy + cx; float zyNext = zx*zy*2.0 + cy; zx = zxNext; zy = zyNext; i += 1.0; } float escapeTime = clamp(i, 0.0,FRING 100.0000000000000123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123
The model can be loaded in but ask any question will get error message like like the following and the prompt template provided is not working.
@Tonight223 Try this solution? https://huggingface.co/unsloth/Qwen3-32B-128K-GGUF/discussions/2#681143602c338e2de8695d71
This is the link to this my very post, what do you mean?
The link to your original post is this: https://huggingface.co/unsloth/Qwen3-32B-128K-GGUF/discussions/2#68109c753cfdf4e8dc883ce2.
The link to my post where I suggested you a solution is this: https://huggingface.co/unsloth/Qwen3-32B-128K-GGUF/discussions/2#681143602c338e2de8695d71
Sorry if it's a bit confusing, it contains the content of your post at the beginning, because I quoted you. You need to scroll further down to see the content of my post. This is the screenshot showing a part of that post I'm referring to: