Adding transformers as the library name (#4)

- Adding `transformers` as the library name (37fdde0d1f220c0286925927d2fe6718b22715de)


Co-authored-by: Aritra Roy Gosthipaty <ariG23498@users.noreply.huggingface.co> (batch 1/1)
This commit is contained in:
systemd
2026-03-02 01:30:47 +00:00
parent f3e6023a32
commit b35d4dd795

View File

@ -1,6 +1,7 @@
--- ---
license: apache-2.0 license: apache-2.0
pipeline_tag: image-text-to-text pipeline_tag: image-text-to-text
library_name: transformers
--- ---
<a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;"> <a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;">
<img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/> <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/>
@ -93,7 +94,7 @@ model = Qwen3VLForConditionalGeneration.from_pretrained(
# device_map="auto", # device_map="auto",
# ) # )
processor = AutoProcessor.from_pretrained("Qwen/Qwen/Qwen3-VL-8B-Thinking") processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-8B-Thinking")
messages = [ messages = [
{ {
@ -116,6 +117,7 @@ inputs = processor.apply_chat_template(
return_dict=True, return_dict=True,
return_tensors="pt" return_tensors="pt"
) )
inputs = inputs.to(model.device)
# Inference: Generation of the output # Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids = model.generate(**inputs, max_new_tokens=128)