initial tuned 4k context length model (#84)

- add 4k context length model (43013a8f5eccffac1f83ec7d00e94b9ecb218380)

Co-authored-by: Haiping Wu <haipingwu@users.noreply.huggingface.co>
This commit is contained in:
ai-modelscope
2025-02-26 20:04:29 +08:00
parent c7a3388824
commit b7db1d9ea3

View File

@ -1,8 +1,7 @@
---
license: mit
license_link: https://huggingface.co/microsoft/Florence-2-large/resolve/main/LICENSE
pipeline_tag: image-to-text
pipeline_tag: image-text-to-text
tags:
- vision
---
@ -11,6 +10,8 @@ tags:
## Model Summary
**This is a continued pretrained version of Florence-2-large model with 4k context length, only 0.1B samples are used for continue pretraining, thus it might not be trained well. In addition, OCR task has been updated with line separator ('\n'). COCO OD AP 39.8**
This Hub repository contains a HuggingFace's `transformers` implementation of Florence-2 model from Microsoft.
Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks. Florence-2 can interpret simple text prompts to perform tasks like captioning, object detection, and segmentation. It leverages our FLD-5B dataset, containing 5.4 billion annotations across 126 million images, to master multi-task learning. The model's sequence-to-sequence architecture enables it to excel in both zero-shot and fine-tuned settings, proving to be a competitive vision foundation model.
@ -28,16 +29,20 @@ Resources and Technical Documentation:
## How to Get Started with the Model
Use the code below to get started with the model.
Use the code below to get started with the model. All models are trained with float16.
```python
import requests
import torch
from PIL import Image
from transformers import AutoProcessor, AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
prompt = "<OD>"
@ -45,12 +50,12 @@ prompt = "<OD>"
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw)
inputs = processor(text=prompt, images=image, return_tensors="pt")
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype)
generated_ids = model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=1024,
max_new_tokens=4096,
num_beams=3,
do_sample=False
)
@ -75,34 +80,36 @@ First, let's define a function to run a prompt.
```python
import requests
import torch
from PIL import Image
from transformers import AutoProcessor, AutoModelForCausalLM
from modelscope import snapshot_download
model_dir = snapshot_download("AI-ModelScope/Florence-2-large")
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True)
processor = AutoProcessor.from_pretrained(model_dir, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
prompt = "<OD>"
url = "https://modelscope.oss-cn-beijing.aliyuncs.com/resource/car.jpg"
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw)
inputs = processor(text=prompt, images=image, return_tensors="pt")
def run_example(task_prompt, text_input=None):
if text_input is None:
prompt = task_prompt
else:
prompt = task_prompt + text_input
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype)
generated_ids = model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=1024,
num_beams=3
)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
generated_ids = model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=1024,
num_beams=3,
do_sample=False
)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
parsed_answer = processor.post_process_generation(generated_text, task="<OD>", image_size=(image.width, image.height))
print(parsed_answer)
print(parsed_answer)
```
</details>
@ -185,6 +192,44 @@ prompt = "<OCR_WITH_REGION>"
run_example(prompt)
```
### Output confidence score with Object Detection
```python
def run_example_with_score(task_prompt, text_input=None):
if text_input is None:
prompt = task_prompt
else:
prompt = task_prompt + text_input
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype)
generated_ids = model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=1024,
num_beams=3,
return_dict_in_generate=True,
output_scores=True,
)
generated_text = processor.batch_decode(generated_ids.sequences, skip_special_tokens=False)[0]
prediction, scores, beam_indices = generated_ids.sequences, generated_ids.scores, generated_ids.beam_indices
transition_beam_scores = model.compute_transition_scores(
sequences=prediction,
scores=scores,
beam_indices=beam_indices,
)
parsed_answer = processor.post_process_generation(sequence=generated_ids.sequences[0],
transition_beam_score=transition_beam_scores[0],
task=task_prompt, image_size=(image.width, image.height)
)
print(parsed_answer)
prompt = "<OD>"
run_example_with_score(prompt)
```
for More detailed examples, please refer to [notebook](https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb)
</details>