Skip to content

Commit dfc25d3

Browse files
authored
Merge pull request #141 from VoltaML/experimental
v0.3.1 - bug fixes, non squashed merge for better track of changes
2 parents d2be5ca + ff48d6d commit dfc25d3

File tree

4 files changed

+12
-10
lines changed

4 files changed

+12
-10
lines changed

core/inference/pytorch/pipeline.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -719,6 +719,8 @@ def img2img(
719719
image: Union[torch.FloatTensor, PIL.Image.Image], # type: ignore
720720
prompt: Union[str, List[str]],
721721
generator: torch.Generator,
722+
height: int = 512,
723+
width: int = 512,
722724
negative_prompt: Optional[Union[str, List[str]]] = None,
723725
strength: float = 0.8,
724726
num_inference_steps: Optional[int] = 50,
@@ -795,6 +797,8 @@ def img2img(
795797
prompt=prompt,
796798
negative_prompt=negative_prompt,
797799
image=image,
800+
height=height,
801+
width=width,
798802
num_inference_steps=num_inference_steps, # type: ignore
799803
guidance_scale=guidance_scale, # type: ignore
800804
self_attention_scale=self_attention_scale,

core/inference/pytorch/pytorch.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -310,6 +310,8 @@ def txt2img(self, job: Txt2ImgQueueEntry) -> List[Image.Image]:
310310
data = pipe.img2img(
311311
prompt=job.data.prompt,
312312
image=latents,
313+
height=latents.shape[2] * 8,
314+
width=latents.shape[3] * 8,
313315
num_inference_steps=flag.steps,
314316
guidance_scale=job.data.guidance_scale,
315317
self_attention_scale=job.data.self_attention_scale,
@@ -360,6 +362,8 @@ def img2img(self, job: Img2ImgQueueEntry) -> List[Image.Image]:
360362
data = pipe.img2img(
361363
prompt=job.data.prompt,
362364
image=input_image,
365+
height=job.data.height, # technically isn't needed, but it's here for consistency sake
366+
width=job.data.width, # technically isn't needed, but it's here for consistency sake
363367
num_inference_steps=job.data.steps,
364368
guidance_scale=job.data.guidance_scale,
365369
self_attention_scale=job.data.self_attention_scale,

core/inference/utilities/latents.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -488,19 +488,13 @@ def scale_latents(
488488
):
489489
"Interpolate the latents to the desired scale."
490490

491-
align_to = (
492-
32 if latent_scale_mode in ["bislerp-tortured", "bislerp-original"] else 8
493-
)
494-
495491
s = time()
496492

497-
logger.debug(
498-
f"Scaling latents with shape {list(latents.shape)}, scale: {scale}, alignment: {align_to}"
499-
)
493+
logger.debug(f"Scaling latents with shape {list(latents.shape)}, scale: {scale}")
500494

501495
# Scale and round to multiple of 32
502-
width_truncated = int(((latents.shape[2] * scale - 1) // align_to + 1) * align_to)
503-
height_truncated = int(((latents.shape[3] * scale - 1) // align_to + 1) * align_to)
496+
width_truncated = int(latents.shape[2] * scale)
497+
height_truncated = int(latents.shape[3] * scale)
504498

505499
# Scale the latents
506500
if latent_scale_mode == "bislerp-tortured":

core/shared.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@
1818

1919
uvicorn_server: Optional["Server"] = None
2020
uvicorn_loop: Optional[asyncio.AbstractEventLoop] = None
21-
asyncio_tasks: list[asyncio.Task] = []
21+
asyncio_tasks: List[asyncio.Task] = []

0 commit comments

Comments
 (0)