Unverified Commit e64fdcf2 authored by pravdomil's avatar pravdomil Committed by GitHub
Browse files

Fix gmflow_dir (#6583)

* remove sys.path

* update readme
parent ec64f371
...@@ -3414,15 +3414,13 @@ pipeline(prompt, uncond, inverted_latent, guidance_scale=7.5, num_inference_step ...@@ -3414,15 +3414,13 @@ pipeline(prompt, uncond, inverted_latent, guidance_scale=7.5, num_inference_step
### Rerender A Video ### Rerender A Video
This is the Diffusers implementation of zero-shot video-to-video translation pipeline [Rerender A Video](https://github.com/williamyang1991/Rerender_A_Video) (without Ebsynth postprocessing). To run the code, please install gmflow. Then modify the path in `examples/community/rerender_a_video.py`: This is the Diffusers implementation of zero-shot video-to-video translation pipeline [Rerender A Video](https://github.com/williamyang1991/Rerender_A_Video) (without Ebsynth postprocessing). To run the code, please install gmflow. Then modify the path in `gmflow_dir`. After that, you can run the pipeline with:
```py ```py
import sys
gmflow_dir = "/path/to/gmflow" gmflow_dir = "/path/to/gmflow"
``` sys.path.insert(0, gmflow_dir)
After that, you can run the pipeline with:
```py
from diffusers import ControlNetModel, AutoencoderKL, DDIMScheduler from diffusers import ControlNetModel, AutoencoderKL, DDIMScheduler
from diffusers.utils import export_to_video from diffusers.utils import export_to_video
import numpy as np import numpy as np
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import sys
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Tuple, Union
...@@ -21,6 +20,7 @@ import PIL.Image ...@@ -21,6 +20,7 @@ import PIL.Image
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import torchvision.transforms as T import torchvision.transforms as T
from gmflow.gmflow import GMFlow
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers.image_processor import VaeImageProcessor from diffusers.image_processor import VaeImageProcessor
...@@ -32,12 +32,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS ...@@ -32,12 +32,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import BaseOutput, deprecate, logging from diffusers.utils import BaseOutput, deprecate, logging
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
gmflow_dir = "/path/to/gmflow"
sys.path.insert(0, gmflow_dir)
from gmflow.gmflow import GMFlow # noqa: E402
from utils.utils import InputPadder # noqa: E402 from utils.utils import InputPadder # noqa: E402
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment