Language 中文|English
Model Details
This model is an mixed-bits INT4 quantized model with group_size 128 and symmetric quantization of Qwen/Qwen3-Coder-30B-A3B-Instruct generated by intel/auto-round. Please follow the license of the original model.
Quantization Strategy (Intel MoE Recipe)
| Layer Type | Bits | Notes |
|---|---|---|
| Expert layers (128 experts) | 4-bit | MoE expert MLPs |
| Non-expert layers (attention, gate) | 16-bit | Higher precision for quality |
| shared_expert_gate | 16-bit | Skipped (shape not divisible by 32) |
| lm_head | Original | Excluded by AutoRound |
Model Size
| Bits | Model Size |
|---|---|
| Original BF16 | ~60GB |
| mixed INT4 | ~20GB (66% reduction ↓↓) |
Quickstart
vLLM Usage
vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs.
Directly talk to the model
import argparse
import atexit
import json
import os
import shutil
import subprocess
import sys
import time
import urllib.error
import urllib.request
def multiline_input():
print('User (type "END" on a single line to send, "exit" to quit):')
lines = []
while True:
line = input()
text = line.strip()
if text.lower() in {"exit", "quit"}:
return None
if text == "END":
break
lines.append(line)
return "\n".join(lines)
def resolve_client_host(host):
return "127.0.0.1" if host in {"0.0.0.0", "::"} else host
def launch_vllm(args, api_key):
cmd = [
"vllm",
"serve",
args.model,
"--served-model-name",
args.served_model_name,
"--host",
args.host,
"--port",
str(args.port),
"--max-model-len",
str(args.max_model_len),
"--tool-call-parser",
args.tool_call_parser,
"--attention-backend",
args.attention_backend,
"--api-key",
api_key,
]
if args.enable_auto_tool_choice:
cmd.append("--enable-auto-tool-choice")
print("Launching vLLM:")
print(" ".join(cmd))
try:
return subprocess.Popen(cmd)
except FileNotFoundError as e:
raise RuntimeError("vllm command not found. Activate an environment that has vllm installed.") from e
def stop_vllm(proc):
if proc and proc.poll() is None:
proc.terminate()
try:
proc.wait(timeout=10)
except subprocess.TimeoutExpired:
proc.kill()
def wait_vllm_ready(base_url, api_key, timeout_sec=180):
deadline = time.time() + timeout_sec
url = f"{base_url}/v1/models"
req = urllib.request.Request(url=url, headers={"Authorization": f"Bearer {api_key}"})
while time.time() < deadline:
try:
with urllib.request.urlopen(req, timeout=3) as resp:
if resp.status == 200:
return True
except urllib.error.URLError:
pass
time.sleep(1)
return False
def chat_once(base_url, model_name, messages, api_key):
payload = {"model": model_name, "messages": messages}
req = urllib.request.Request(
url=f"{base_url}/v1/chat/completions",
data=json.dumps(payload, ensure_ascii=False).encode("utf-8"),
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
},
method="POST",
)
with urllib.request.urlopen(req, timeout=600) as resp:
data = json.loads(resp.read().decode("utf-8"))
return data["choices"][0]["message"]
def chat_loop(base_url, model_name, api_key):
print("\n===== Chat Started =====\n")
messages = []
while True:
user_text = multiline_input()
if user_text is None:
break
messages.append({"role": "user", "content": user_text})
try:
assistant_msg = chat_once(base_url, model_name, messages, api_key)
except Exception as e:
print(f"\nRequest failed: {e}\n")
messages.pop()
continue
content = assistant_msg.get("content")
tool_calls = assistant_msg.get("tool_calls")
if content:
print(f"\nAssistant:\n{content}\n")
elif tool_calls:
print("\nAssistant(tool_calls):")
print(json.dumps(tool_calls, ensure_ascii=False, indent=2))
print()
else:
print("\nAssistant:\n(empty response)\n")
normalized_msg = {"role": "assistant", "content": content or ""}
if tool_calls:
normalized_msg["tool_calls"] = tool_calls
messages.append(normalized_msg)
def build_client_command(args):
cmd = [
sys.executable,
os.path.abspath(__file__),
"--_client",
"--model",
args.model,
"--served-model-name",
args.served_model_name,
"--host",
args.host,
"--port",
str(args.port),
"--max-model-len",
str(args.max_model_len),
"--tool-call-parser",
args.tool_call_parser,
"--attention-backend",
args.attention_backend,
"--enable-auto-tool-choice" if args.enable_auto_tool_choice else "--no-enable-auto-tool-choice",
]
return cmd
def spawn_chat_terminal(args, api_key):
client_cmd = build_client_command(args)
env = os.environ.copy()
env["VLLM_API_KEY"] = api_key
terminal_cmd = None
if os.name == "nt":
# Open a new cmd window on Windows and keep it alive for interactive chat.
terminal_cmd = [
"cmd",
"/c",
"start",
"",
"cmd",
"/k",
subprocess.list2cmdline(client_cmd),
]
elif shutil.which("gnome-terminal"):
terminal_cmd = ["gnome-terminal", "--", *client_cmd]
elif shutil.which("x-terminal-emulator"):
terminal_cmd = ["x-terminal-emulator", "-e", *client_cmd]
if not terminal_cmd:
return False
try:
subprocess.Popen(terminal_cmd, env=env)
return True
except Exception as e:
print(f"Failed to open a new terminal automatically: {e}")
return False
def parse_args():
parser = argparse.ArgumentParser(description="Minimal local vLLM chat script")
parser.add_argument("--_client", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--model", default="YCWTG/Qwen3-Coder-30B-A3B-Instruct-int4-mixed-AutoRound")
parser.add_argument(
"--served-model-name",
default="YCWTG/Qwen3-Coder-30B-A3B-Instruct-int4-mixed-AutoRound",
)
parser.add_argument("--host", default="0.0.0.0")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--max-model-len", type=int, default=106666)
parser.add_argument(
"--enable-auto-tool-choice",
action=argparse.BooleanOptionalAction,
default=True,
)
parser.add_argument("--tool-call-parser", default="qwen3_coder")
parser.add_argument("--attention-backend", default="FLASHINFER")
return parser.parse_args()
def main():
args = parse_args()
api_key = os.environ.get("VLLM_API_KEY") or "local-dev-key"
base_url = f"http://{resolve_client_host(args.host)}:{args.port}"
if args._client:
chat_loop(base_url, args.served_model_name, api_key)
return
proc = launch_vllm(args, api_key)
atexit.register(stop_vllm, proc)
print(f"Waiting for service to become ready: {base_url}")
if not wait_vllm_ready(base_url, api_key):
print("vLLM startup timed out. Check server logs above.")
stop_vllm(proc)
sys.exit(1)
if spawn_chat_terminal(args, api_key):
print("Model is ready. Opened a new terminal for chat; this terminal keeps server logs.")
print("Press Ctrl+C here to stop vLLM.")
try:
proc.wait()
except KeyboardInterrupt:
print("\nInterrupted. Stopping vLLM...")
else:
print("No supported terminal found. Falling back to chat in this terminal.")
chat_loop(base_url, args.served_model_name, api_key)
if __name__ == "__main__":
main()
Directly use the OpenAPI
vllm serve --model YCWTG/Qwen3-Coder-30B-A3B-Instruct-int4-mixed-AutoRound --host localhost --port 8000 --max-model-len 106666 --enable-auto-tool-choice --tool-call-parser qwen3_coder
See its documentation for more details.
The following will create API endpoints at http://localhost:8000/v1.
Generate the Model
from auto_round import AutoRound
model_name = "Qwen/Qwen3-Coder-30B-A3B-Instruct"
# Build layer config for mixed-bits (Intel recipe)
layer_config = {}
for i in range(48): # 48 layers
prefix = f"model.layers.{i}"
# Attention layers -> 16-bit
for proj in ["q_proj", "k_proj", "v_proj", "o_proj"]:
layer_config[f"{prefix}.self_attn.{proj}"] = {"bits": 16}
# MLP gate -> 16-bit
layer_config[f"{prefix}.mlp.gate"] = {"bits": 16}
autoround = AutoRound(
model_name,
bits=4, # Default for experts
dataset="github-code-clean",
group_size=128,
sym=True,
iters=1000,
nsamples=512,
lr=2e-3,
layer_config=layer_config,
low_gpu_mem_usage=True
)
output_dir="~/model/YCWTG--Qwen3-Coder-30B-A3B-Instruct-int4-mixed-AutoRound"
autoround.quantize_and_save(output_dir,format="auto_round" )
Ethical Considerations and Limitations
The model can produce factually incorrect output, and should not be relied on to produce factually accurate information. Because of the limitations of the pretrained model and the finetuning datasets, it is possible that this model could generate lewd, biased or otherwise offensive outputs.
Therefore, before deploying any applications of the model, developers should perform safety testing.
Caveats and Recommendations
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model.
Here are a couple of useful links to learn more about Intel's AI software:
Disclaimer
The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please consult an attorney before using this model for commercial purposes.
Cite
@article{cheng2023optimize, title={Optimize weight rounding via signed gradient descent for the quantization of llms}, author={Cheng, Wenhua and Zhang, Weiwei and Shen, Haihao and Cai, Yiyang and He, Xin and Lv, Kaokao and Liu, Yi}, journal={arXiv preprint arXiv:2309.05516}, year={2023} }
- Downloads last month
- 156
Model tree for YCWTG/Qwen3-Coder-30B-A3B-Instruct-int4-mixed-AutoRound
Base model
Qwen/Qwen3-Coder-30B-A3B-Instruct