Unverified Commit fc5fdc10 authored by Partho's avatar Partho Committed by GitHub
Browse files

[Doctest] Add `configuration_clip.py` (#19647)

* CLIP Config for doctest

* add doc example to CLIPConfig

* add from_text_vision_configs example

* added comment explaining objective
parent c9a0da1e
...@@ -80,12 +80,12 @@ class CLIPTextConfig(PretrainedConfig): ...@@ -80,12 +80,12 @@ class CLIPTextConfig(PretrainedConfig):
Example: Example:
```python ```python
>>> from transformers import CLIPTextModel, CLIPTextConfig >>> from transformers import CLIPTextConfig, CLIPTextModel
>>> # Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPTextConfig() >>> configuration = CLIPTextConfig()
>>> # Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPTextModel(configuration) >>> model = CLIPTextModel(configuration)
>>> # Accessing the model configuration >>> # Accessing the model configuration
...@@ -186,12 +186,12 @@ class CLIPVisionConfig(PretrainedConfig): ...@@ -186,12 +186,12 @@ class CLIPVisionConfig(PretrainedConfig):
Example: Example:
```python ```python
>>> from transformers import CLIPVisionModel, CLIPVisionConfig >>> from transformers import CLIPVisionConfig, CLIPVisionModel
>>> # Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPVisionConfig() >>> configuration = CLIPVisionConfig()
>>> # Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPVisionModel(configuration) >>> model = CLIPVisionModel(configuration)
>>> # Accessing the model configuration >>> # Accessing the model configuration
...@@ -270,7 +270,29 @@ class CLIPConfig(PretrainedConfig): ...@@ -270,7 +270,29 @@ class CLIPConfig(PretrainedConfig):
The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation. The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
kwargs (*optional*): kwargs (*optional*):
Dictionary of keyword arguments. Dictionary of keyword arguments.
"""
Example:
```python
>>> from transformers import CLIPConfig, CLIPModel
>>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPConfig()
>>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
>>> # Initializing a CLIPText and CLIPVision configuration
>>> config_text = CLIPTextConfig()
>>> config_vision = CLIPVisionConfig()
>>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)
```"""
model_type = "clip" model_type = "clip"
is_composition = True is_composition = True
......
...@@ -33,6 +33,7 @@ src/transformers/models/blenderbot_small/configuration_blenderbot_small.py ...@@ -33,6 +33,7 @@ src/transformers/models/blenderbot_small/configuration_blenderbot_small.py
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
src/transformers/models/bloom/configuration_bloom.py src/transformers/models/bloom/configuration_bloom.py
src/transformers/models/canine/configuration_canine.py src/transformers/models/canine/configuration_canine.py
src/transformers/models/clip/configuration_clip.py
src/transformers/models/codegen/configuration_codegen.py src/transformers/models/codegen/configuration_codegen.py
src/transformers/models/conditional_detr/configuration_conditional_detr.py src/transformers/models/conditional_detr/configuration_conditional_detr.py
src/transformers/models/conditional_detr/modeling_conditional_detr.py src/transformers/models/conditional_detr/modeling_conditional_detr.py
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment