Unverified Commit 2af199c4 authored by Raushan Turganbay's avatar Raushan Turganbay Committed by GitHub
Browse files

Update docs (#32368)

nits
parent 82efc535
...@@ -137,7 +137,7 @@ from transformers import ChameleonForConditionalGeneration, BitsAndBytesConfig ...@@ -137,7 +137,7 @@ from transformers import ChameleonForConditionalGeneration, BitsAndBytesConfig
quantization_config = BitsAndBytesConfig( quantization_config = BitsAndBytesConfig(
load_in_4bit=True, load_in_4bit=True,
bnb_4bit_quant_type="nf4", bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16, bnb_4bit_compute_dtype=torch.bfloat16,
) )
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="cuda") model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="cuda")
......
...@@ -1558,7 +1558,7 @@ class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel): ...@@ -1558,7 +1558,7 @@ class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel):
... "In which city is that bridge located?<image>", ... "In which city is that bridge located?<image>",
... ] ... ]
>>> images = [[image1, image2], [image3]] >>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, padding=True, return_tensors="pt").to("cuda") >>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to("cuda")
>>> # Generate >>> # Generate
>>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20) >>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment