Commit 3d61ea4d authored by chenpangpang's avatar chenpangpang
Browse files

feat: remove cache

parent af76935e
from transformers import pipeline, set_seed
import transformers
transformers.utils.move_cache()
generator = pipeline('text-generation', model='openai-community/gpt2', device='cuda')
output = generator("Hello, I'm a language model,", truncation=True, max_length=30, num_return_sequences=1)[0][
"generated_text"]
......
import torch
from diffusers import StableDiffusionPipeline
import transformers
transformers.utils.move_cache()
model_id = "CompVis/stable-diffusion-v1-4"
device = "cuda"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment