CheXagent
Collection
9 items β’ Updated β’ 1
How to use StanfordAIMI/CheXagent-2-3b with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="StanfordAIMI/CheXagent-2-3b", trust_remote_code=True)
messages = [
{"role": "user", "content": "Who are you?"},
]
pipe(messages) # Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("StanfordAIMI/CheXagent-2-3b", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-2-3b", trust_remote_code=True)
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))How to use StanfordAIMI/CheXagent-2-3b with vLLM:
# Install vLLM from pip:
pip install vllm
# Start the vLLM server:
vllm serve "StanfordAIMI/CheXagent-2-3b"
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:8000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "StanfordAIMI/CheXagent-2-3b",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
]
}'docker model run hf.co/StanfordAIMI/CheXagent-2-3b
How to use StanfordAIMI/CheXagent-2-3b with SGLang:
# Install SGLang from pip:
pip install sglang
# Start the SGLang server:
python3 -m sglang.launch_server \
--model-path "StanfordAIMI/CheXagent-2-3b" \
--host 0.0.0.0 \
--port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "StanfordAIMI/CheXagent-2-3b",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
]
}'docker run --gpus all \
--shm-size 32g \
-p 30000:30000 \
-v ~/.cache/huggingface:/root/.cache/huggingface \
--env "HF_TOKEN=<secret>" \
--ipc=host \
lmsysorg/sglang:latest \
python3 -m sglang.launch_server \
--model-path "StanfordAIMI/CheXagent-2-3b" \
--host 0.0.0.0 \
--port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "StanfordAIMI/CheXagent-2-3b",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
]
}'How to use StanfordAIMI/CheXagent-2-3b with Docker Model Runner:
docker model run hf.co/StanfordAIMI/CheXagent-2-3b
python=3.10
torch==2.7.1 # may work with more recent version
torchvision==0.22.1
transformers==4.40.0
opencv-python
albumentations
accelerate
Pillow
matplotlib
einops
pyarrow
sentencepiece
protobuf
π Paper β’ π€ Hugging Face β’ π§© Github β’ πͺ Project
import io
import requests
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoTokenizer
# step 1: Setup constant
model_name = "StanfordAIMI/CheXagent-2-3b"
dtype = torch.bfloat16
device = "cuda"
# step 2: Load Processor and Model
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
model = model.to(dtype)
model.eval()
# step 3: Inference
query = tokenizer.from_list_format([*[{'image': path} for path in paths], {'text': prompt}])
conv = [{"from": "system", "value": "You are a helpful assistant."}, {"from": "human", "value": query}]
input_ids = tokenizer.apply_chat_template(conv, add_generation_prompt=True, return_tensors="pt")
output = model.generate(
input_ids.to(device), do_sample=False, num_beams=1, temperature=1., top_p=1., use_cache=True,
max_new_tokens=512
)[0]
response = tokenizer.decode(output[input_ids.size(1):-1])
@article{chexagent-2024,
title={CheXagent: Towards a Foundation Model for Chest X-Ray Interpretation},
author={Chen, Zhihong and Varma, Maya and Delbrouck, Jean-Benoit and Paschali, Magdalini and Blankemeier, Louis and Veen, Dave Van and Valanarasu, Jeya Maria Jose and Youssef, Alaa and Cohen, Joseph Paul and Reis, Eduardo Pontes and Tsai, Emily B. and Johnston, Andrew and Olsen, Cameron and Abraham, Tanishq Mathew and Gatidis, Sergios and Chaudhari, Akshay S and Langlotz, Curtis},
journal={arXiv preprint arXiv:2401.12208},
url={https://arxiv.org/abs/2401.12208},
year={2024}
}