Unverified Commit fe008d6e authored by Raushan Turganbay's avatar Raushan Turganbay Committed by GitHub
Browse files

Chameleon: not supported with fast load (#32091)

fixes
parent 62aa270f
...@@ -761,7 +761,7 @@ ...@@ -761,7 +761,7 @@
- local: model_doc/bros - local: model_doc/bros
title: BROS title: BROS
- local: model_doc/chameleon - local: model_doc/chameleon
title: chameleon title: Chameleon
- local: model_doc/chinese_clip - local: model_doc/chinese_clip
title: Chinese-CLIP title: Chinese-CLIP
- local: model_doc/clip - local: model_doc/clip
......
...@@ -55,14 +55,14 @@ The original code can be found [here](https://github.com/facebookresearch/chamel ...@@ -55,14 +55,14 @@ The original code can be found [here](https://github.com/facebookresearch/chamel
- Chameleon generates in chat format which means that the generated text will always be the "assistant's turn". You can enable a text completion generation by passing `return_for_text_completion=True` when calling the processor. - Chameleon generates in chat format which means that the generated text will always be the "assistant's turn". You can enable a text completion generation by passing `return_for_text_completion=True` when calling the processor.
> [!NOTE] > [!NOTE]
> Chameleon implementation in Transformers uses a special image token to indicate where to merge image embeddings. For special image token we didn't add a new one but used one of the reserved tokens: `<reserved08707>`. > Chameleon implementation in Transformers uses a special image token to indicate where to merge image embeddings. For special image token we didn't add a new one but used one of the reserved tokens: `<reserved08707>`. You have to add `<image>` to your prompt in the place where the image should be embedded for correct generation.
## Usage example ## Usage example
### Single image inference ### Single image inference
Chameleon is a gated model so make sure to have access and login to Hugging Face Hub using a token. Chameleon is a gated model so make sure to have access and login to Hugging Face Hub using a token.
Here's how to load the model and perform inference in half-precision (`torch.float16`): Here's how to load the model and perform inference in half-precision (`torch.bfloat16`):
```python ```python
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
...@@ -71,7 +71,7 @@ from PIL import Image ...@@ -71,7 +71,7 @@ from PIL import Image
import requests import requests
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="cuda") model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
# prepare image and text prompt # prepare image and text prompt
url = 'http://images.cocodataset.org/val2017/000000039769.jpg' url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
...@@ -97,7 +97,7 @@ import requests ...@@ -97,7 +97,7 @@ import requests
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16, device_map="cuda") model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
# Get three different images # Get three different images
url = "https://www.ilankelman.org/stopsigns/australia.jpg" url = "https://www.ilankelman.org/stopsigns/australia.jpg"
...@@ -117,7 +117,7 @@ prompts = [ ...@@ -117,7 +117,7 @@ prompts = [
# We can simply feed images in the order they have to be used in the text prompt # We can simply feed images in the order they have to be used in the text prompt
# Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens # Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens
inputs = processor(text=prompts, images=[image_stop, image_cats, image_snowman], padding=True, return_tensors="pt").to(device="cuda", dtype=torch.float16) inputs = processor(text=prompts, images=[image_stop, image_cats, image_snowman], padding=True, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)
# Generate # Generate
generate_ids = model.generate(**inputs, max_new_tokens=50) generate_ids = model.generate(**inputs, max_new_tokens=50)
...@@ -153,7 +153,7 @@ from transformers import ChameleonForConditionalGeneration ...@@ -153,7 +153,7 @@ from transformers import ChameleonForConditionalGeneration
model_id = "facebook/chameleon-7b" model_id = "facebook/chameleon-7b"
model = ChameleonForConditionalGeneration.from_pretrained( model = ChameleonForConditionalGeneration.from_pretrained(
model_id, model_id,
torch_dtype=torch.float16, torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True, low_cpu_mem_usage=True,
attn_implementation="flash_attention_2" attn_implementation="flash_attention_2"
).to(0) ).to(0)
......
...@@ -1096,6 +1096,7 @@ class ChameleonPreTrainedModel(PreTrainedModel): ...@@ -1096,6 +1096,7 @@ class ChameleonPreTrainedModel(PreTrainedModel):
_supports_quantized_cache = True _supports_quantized_cache = True
_supports_cache_class = True _supports_cache_class = True
_supports_static_cache = True _supports_static_cache = True
_supports_param_buffer_assignment = False
def _init_weights(self, module): def _init_weights(self, module):
std = self.config.initializer_range std = self.config.initializer_range
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment