Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix CI #46

Merged
merged 8 commits into from
Mar 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions .github/workflows/python-app.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,6 @@ jobs:
run: |
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
cd tests && python pipe_test.py
28 changes: 20 additions & 8 deletions multigen/pipes.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import importlib
import logging
from enum import Enum

import torch
Expand Down Expand Up @@ -52,6 +53,7 @@ class BasePipe:
def __init__(self, model_id: str,
sd_pipe_class: Optional[Type[DiffusionPipeline]]=None,
pipe: Optional[DiffusionPipeline] = None, **args):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.pipe = pipe
self._scheduler = None
self._hypernets = []
Expand All @@ -60,8 +62,10 @@ def __init__(self, model_id: str,
# Creating a stable diffusion pipeine
args = {**args}
if 'torch_dtype' not in args:
args['torch_dtype']=torch.float16

if device == torch.device('cpu'):
args['torch_dtype'] = torch.float32
else:
args['torch_dtype'] = torch.float16
if self.pipe is None:
constructor_args = dict()
if isinstance(self, Cond2ImPipe):
Expand All @@ -86,15 +90,17 @@ def __init__(self, model_id: str,
self.pipe = sd_pipe_class.from_single_file(model_id, **args)
else:
self.pipe = sd_pipe_class.from_pretrained(model_id, **args)

self.pipe.to("cuda")
self.pipe.to(device)
# self.pipe.enable_attention_slicing()
# self.pipe.enable_vae_slicing()
self.pipe.vae.enable_tiling()
# --- the best one and seems to be enough ---
# self.pipe.enable_sequential_cpu_offload()
self.pipe.enable_xformers_memory_efficient_attention() # attention_op=MemoryEfficientAttentionFlashAttentionOp)
# self.pipe.vae.enable_xformers_memory_efficient_attention() # attention_op=None)
try:
import xformers
self.pipe.enable_xformers_memory_efficient_attention() # attention_op=MemoryEfficientAttentionFlashAttentionOp)
except ImportError as e:
logging.warning("xformers not found, can't use efficient attention")

@property
def scheduler(self):
Expand Down Expand Up @@ -338,7 +344,10 @@ def __init__(self, model_id, pipe: Optional[StableDiffusionControlNetPipeline] =
ctypes = [ctypes]
self.ctypes = ctypes
self._condition_image = None
dtype = torch.float16 if 'torch_type' not in args else args['torch_type']
dtype = torch.float32
if torch.cuda.is_available():
dtype = torch.float16
dtype = args.get('torch_type', dtype)
cpath = self.get_cpath()
cmodels = self.get_cmodels()
sd_class = self.get_sd_class()
Expand Down Expand Up @@ -492,7 +501,10 @@ class InpaintingPipe(BasePipe):

def __init__(self, model_id, pipe: Optional[StableDiffusionControlNetPipeline] = None,
**args):
dtype = torch.float16 if 'torch_type' not in args else args['torch_type']
dtype = torch.float32
if torch.cuda.is_available():
dtype = torch.float16
dtype = args.get('torch_type', dtype)
cnet = ControlNetModel.from_pretrained(
Cond2ImPipe.cpath+Cond2ImPipe.cmodels["inpaint"], torch_dtype=dtype)
super().__init__(model_id=model_id, pipe=pipe, controlnet=cnet, **args)
Expand Down
11 changes: 6 additions & 5 deletions tests/pipe_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def test_basic_txt2im(self):
model = self.get_model()
# create pipe
pipe = Prompt2ImPipe(model, pipe=self._pipeline)
pipe.setup(width=512, height=512, guidance_scale=7, scheduler="DPMSolverMultistepScheduler")
pipe.setup(width=512, height=512, guidance_scale=7, scheduler="DPMSolverMultistepScheduler", steps=5)
seed = 49045438434843
params = dict(prompt="a cube planet, cube-shaped, space photo, masterpiece",
negative_prompt="spherical",
Expand Down Expand Up @@ -65,16 +65,17 @@ def test_with_session(self):
["surrealism", "impressionism", "high tech", "cyberpunk"]]

pipe = Prompt2ImPipe(model, pipe=self._pipeline)
pipe.setup(width=512, height=512, scheduler="DPMSolverMultistepScheduler")
pipe.setup(width=512, height=512, scheduler="DPMSolverMultistepScheduler", steps=5)
# remove directory if it exists
dirname = "./gen_batch"
if os.path.exists(dirname):
shutil.rmtree(dirname)
# create session
gs = GenSession(dirname, pipe, Cfgen(prompt, nprompt))
gs.gen_sess(add_count=10)
gs.gen_sess(add_count=2)
# count number of generated files
self.assertEqual(len(os.listdir(dirname)), 20)
# each images goes with a txt file
self.assertEqual(len(os.listdir(dirname)), 4)

def test_loader(self):
loader = Loader()
Expand All @@ -87,7 +88,7 @@ def test_loader(self):
# create prompt2im pipe
pipeline = loader.load_pipeline(Prompt2ImPipe._class, model_id)
prompt2image = Prompt2ImPipe(model_id, pipe=pipeline)
prompt2image.setup(width=512, height=512, scheduler="DPMSolverMultistepScheduler", clip_skip=2)
prompt2image.setup(width=512, height=512, scheduler="DPMSolverMultistepScheduler", clip_skip=2, steps=5)

self.assertEqual(inpaint.pipe.unet.conv_out.weight.data_ptr(),
prompt2image.pipe.unet.conv_out.weight.data_ptr(),
Expand Down
Loading