Skip to content

Commit

Permalink
Merge branch 'AUTOMATIC1111:dev' into mac-builds-experimental
Browse files Browse the repository at this point in the history
  • Loading branch information
brkirch committed Aug 20, 2023
2 parents 75ca805 + 9d2299e commit c3f84f9
Show file tree
Hide file tree
Showing 22 changed files with 140 additions and 64 deletions.
21 changes: 2 additions & 19 deletions javascript/ui.js
Original file line number Diff line number Diff line change
Expand Up @@ -19,28 +19,11 @@ function all_gallery_buttons() {
}

function selected_gallery_button() {
var allCurrentButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnail-item.thumbnail-small.selected');
var visibleCurrentButton = null;
allCurrentButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) {
visibleCurrentButton = elem;
}
});
return visibleCurrentButton;
return all_gallery_buttons().find(elem => elem.classList.contains('selected')) ?? null;
}

function selected_gallery_index() {
var buttons = all_gallery_buttons();
var button = selected_gallery_button();

var result = -1;
buttons.forEach(function(v, i) {
if (v == button) {
result = i;
}
});

return result;
return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected'));
}

function extract_image_from_gallery(gallery) {
Expand Down
18 changes: 14 additions & 4 deletions modules/api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@
from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image
from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_aliases
from modules.sd_vae import vae_dict
from modules.sd_models import unload_model_weights, reload_model_weights, checkpoint_aliases
from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
Expand Down Expand Up @@ -57,6 +56,15 @@ def setUpscalers(req: dict):


def decode_base64_to_image(encoding):
if encoding.startswith("http://") or encoding.startswith("https://"):
import requests
response = requests.get(encoding, timeout=30, headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'})
try:
image = Image.open(BytesIO(response.content))
return image
except Exception as e:
raise HTTPException(status_code=500, detail="Invalid image url") from e

if encoding.startswith("data:image/"):
encoding = encoding.split(";")[1].split(",")[1]
try:
Expand Down Expand Up @@ -567,10 +575,12 @@ def get_latent_upscale_modes(self):
]

def get_sd_models(self):
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
import modules.sd_models as sd_models
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()]

def get_sd_vaes(self):
return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()]
import modules.sd_vae as sd_vae
return [{"model_name": x, "filename": sd_vae.vae_dict[x]} for x in sd_vae.vae_dict.keys()]

def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
Expand Down
2 changes: 1 addition & 1 deletion modules/cmd_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@
parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it")
parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path])
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
Expand Down
15 changes: 13 additions & 2 deletions modules/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,9 @@ class FilenameGenerator:
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
'prompt_hash': lambda self: hashlib.sha256(self.prompt.encode()).hexdigest()[0:8],
'prompt_hash': lambda self, *args: self.string_hash(self.prompt, *args),
'negative_prompt_hash': lambda self, *args: self.string_hash(self.p.negative_prompt, *args),
'full_prompt_hash': lambda self, *args: self.string_hash(f"{self.p.prompt} {self.p.negative_prompt}", *args), # a space in between to create a unique string
'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
Expand All @@ -368,7 +370,8 @@ class FilenameGenerator:
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
'user': lambda self: self.p.user,
'vae_filename': lambda self: self.get_vae_filename(),
'none': lambda self: '', # Overrides the default so you can get just the sequence number
'none': lambda self: '', # Overrides the default, so you can get just the sequence number
'image_hash': lambda self, *args: self.image_hash(*args) # accepts formats: [image_hash<length>] default full hash
}
default_time_format = '%Y%m%d%H%M%S'

Expand Down Expand Up @@ -448,6 +451,14 @@ def datetime(self, *args):

return sanitize_filename_part(formatted_time, replace_spaces=False)

def image_hash(self, *args):
length = int(args[0]) if (args and args[0] != "") else None
return hashlib.sha256(self.image.tobytes()).hexdigest()[0:length]

def string_hash(self, text, *args):
length = int(args[0]) if (args and args[0] != "") else 8
return hashlib.sha256(text.encode()).hexdigest()[0:length]

def apply(self, x):
res = ''

Expand Down
6 changes: 2 additions & 4 deletions modules/img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,15 +122,14 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
is_batch = mode == 5

if mode == 0: # img2img
image = init_img.convert("RGB")
image = init_img
mask = None
elif mode == 1: # img2img sketch
image = sketch.convert("RGB")
image = sketch
mask = None
elif mode == 2: # inpaint
image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
mask = processing.create_binary_mask(mask)
image = image.convert("RGB")
elif mode == 3: # inpaint sketch
image = inpaint_color_sketch
orig = inpaint_color_sketch_orig or inpaint_color_sketch
Expand All @@ -139,7 +138,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
blur = ImageFilter.GaussianBlur(mask_blur)
image = Image.composite(image.filter(blur), orig, mask.filter(blur))
image = image.convert("RGB")
elif mode == 4: # inpaint upload mask
image = init_img_inpaint
mask = init_mask_inpaint
Expand Down
2 changes: 1 addition & 1 deletion modules/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def list_extensions(settings_file):
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')

if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions:
if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions or not os.path.isdir(extensions_dir):
return []

return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
Expand Down
32 changes: 16 additions & 16 deletions modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,14 +386,14 @@ def get_token_merging_ratio(self, for_hr=False):
return self.token_merging_ratio or opts.token_merging_ratio

def setup_prompts(self):
if type(self.prompt) == list:
if isinstance(self.prompt,list):
self.all_prompts = self.prompt
elif type(self.negative_prompt) == list:
elif isinstance(self.negative_prompt, list):
self.all_prompts = [self.prompt] * len(self.negative_prompt)
else:
self.all_prompts = self.batch_size * self.n_iter * [self.prompt]

if type(self.negative_prompt) == list:
if isinstance(self.negative_prompt, list):
self.all_negative_prompts = self.negative_prompt
else:
self.all_negative_prompts = [self.negative_prompt] * len(self.all_prompts)
Expand Down Expand Up @@ -512,10 +512,10 @@ def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="",
self.s_noise = p.s_noise
self.s_min_uncond = p.s_min_uncond
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
self.prompt = self.prompt if not isinstance(self.prompt, list) else self.prompt[0]
self.negative_prompt = self.negative_prompt if not isinstance(self.negative_prompt, list) else self.negative_prompt[0]
self.seed = int(self.seed if not isinstance(self.seed, list) else self.seed[0]) if self.seed is not None else -1
self.subseed = int(self.subseed if not isinstance(self.subseed, list) else self.subseed[0]) if self.subseed is not None else -1
self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning

self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
Expand Down Expand Up @@ -702,11 +702,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}

try:
# after running refiner, the refiner model is not unloaded - webui swaps back to main model here
if shared.sd_model.sd_checkpoint_info.title != opts.sd_model_checkpoint:
sd_models.reload_model_weights()

# if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
# and if after running refiner, the refiner model is not unloaded - webui swaps back to main model here, if model over is present it will be reloaded afterwards
if sd_models.checkpoint_aliases.get(p.override_settings.get('sd_model_checkpoint')) is None:
p.override_settings.pop('sd_model_checkpoint', None)
sd_models.reload_model_weights()
Expand Down Expand Up @@ -741,7 +738,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
def process_images_inner(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""

if type(p.prompt) == list:
if isinstance(p.prompt, list):
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
Expand Down Expand Up @@ -772,12 +769,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:

p.setup_prompts()

if type(seed) == list:
if isinstance(seed, list):
p.all_seeds = seed
else:
p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]

if type(subseed) == list:
if isinstance(subseed, list):
p.all_subseeds = subseed
else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
Expand Down Expand Up @@ -1155,6 +1152,9 @@ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subs
devices.torch_gc()

def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
if shared.state.interrupted:
return samples

self.is_hr_pass = True

target_width = self.hr_upscale_to_x
Expand Down Expand Up @@ -1268,12 +1268,12 @@ def setup_prompts(self):
if self.hr_negative_prompt == '':
self.hr_negative_prompt = self.negative_prompt

if type(self.hr_prompt) == list:
if isinstance(self.hr_prompt, list):
self.all_hr_prompts = self.hr_prompt
else:
self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]

if type(self.hr_negative_prompt) == list:
if isinstance(self.hr_negative_prompt, list):
self.all_hr_negative_prompts = self.hr_negative_prompt
else:
self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
Expand Down
2 changes: 1 addition & 1 deletion modules/prompt_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def alternate(self, args):
yield args[(step - 1) % len(args)]
def start(self, args):
def flatten(x):
if type(x) == str:
if isinstance(x, str):
yield x
else:
for gen in x:
Expand Down
26 changes: 26 additions & 0 deletions modules/script_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,15 @@ def __init__(self, image, p, filename, pnginfo):
"""dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'"""


class ExtraNoiseParams:
def __init__(self, noise, x):
self.noise = noise
"""Random noise generated by the seed"""

self.x = x
"""Latent image representation of the image"""


class CFGDenoiserParams:
def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond):
self.x = x
Expand Down Expand Up @@ -100,6 +109,7 @@ def __init__(self, imgs, cols, rows):
callbacks_ui_settings=[],
callbacks_before_image_saved=[],
callbacks_image_saved=[],
callbacks_extra_noise=[],
callbacks_cfg_denoiser=[],
callbacks_cfg_denoised=[],
callbacks_cfg_after_cfg=[],
Expand Down Expand Up @@ -189,6 +199,14 @@ def image_saved_callback(params: ImageSaveParams):
report_exception(c, 'image_saved_callback')


def extra_noise_callback(params: ExtraNoiseParams):
for c in callback_map['callbacks_extra_noise']:
try:
c.callback(params)
except Exception:
report_exception(c, 'callbacks_extra_noise')


def cfg_denoiser_callback(params: CFGDenoiserParams):
for c in callback_map['callbacks_cfg_denoiser']:
try:
Expand Down Expand Up @@ -367,6 +385,14 @@ def on_image_saved(callback):
add_callback(callback_map['callbacks_image_saved'], callback)


def on_extra_noise(callback):
"""register a function to be called before adding extra noise in img2img or hires fix;
The callback is called with one argument:
- params: ExtraNoiseParams - contains noise determined by seed and latent representation of image
"""
add_callback(callback_map['callbacks_extra_noise'], callback)


def on_cfg_denoiser(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
The callback is called with one argument:
Expand Down
4 changes: 2 additions & 2 deletions modules/scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def elem_id(self, item_id):
"""helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id"""

need_tabname = self.show(True) == self.show(False)
tabkind = 'img2img' if self.is_img2img else 'txt2txt'
tabkind = 'img2img' if self.is_img2img else 'txt2img'
tabname = f"{tabkind}_" if need_tabname else ""
title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower()))

Expand All @@ -289,7 +289,7 @@ def elem_id(self, item_id):
"""helper function to generate id for a HTML element, constructs final id out of tab and user-supplied item_id"""

need_tabname = self.show(True) == self.show(False)
tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else ""
tabname = ('img2img' if self.is_img2img else 'txt2img') + "_" if need_tabname else ""

return f'{tabname}{item_id}'

Expand Down
16 changes: 15 additions & 1 deletion modules/sd_hijack.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,21 @@ def flatten(el):
ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward

def undo_hijack(self, m):
if type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
conditioner = getattr(m, 'conditioner', None)
if conditioner:
for i in range(len(conditioner.embedders)):
embedder = conditioner.embedders[i]
if isinstance(embedder, (sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords, sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords)):
embedder.wrapped.model.token_embedding = embedder.wrapped.model.token_embedding.wrapped
conditioner.embedders[i] = embedder.wrapped
if isinstance(embedder, sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords):
embedder.wrapped.transformer.text_model.embeddings.token_embedding = embedder.wrapped.transformer.text_model.embeddings.token_embedding.wrapped
conditioner.embedders[i] = embedder.wrapped

if hasattr(m, 'cond_stage_model'):
delattr(m, 'cond_stage_model')

elif type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped

elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
Expand Down
5 changes: 4 additions & 1 deletion modules/sd_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,10 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
model.to(memory_format=torch.channels_last)
timer.record("apply channels_last")

if not shared.cmd_opts.no_half:
if shared.cmd_opts.no_half:
model.float()
timer.record("apply float()")
else:
vae = model.first_stage_model
depth_model = getattr(model, 'depth_model', None)

Expand Down
5 changes: 3 additions & 2 deletions modules/sd_samplers_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def setup_img2img_steps(p, steps=None):

def samples_to_images_tensor(sample, approximation=None, model=None):
'''latents -> images [-1, 1]'''
if approximation is None:
if approximation is None or (shared.state.interrupted and opts.live_preview_fast_interrupt):
approximation = approximation_indexes.get(opts.show_progress_type, 0)

if approximation == 2:
Expand All @@ -49,7 +49,8 @@ def samples_to_images_tensor(sample, approximation=None, model=None):
else:
if model is None:
model = shared.sd_model
x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype))
with devices.without_autocast(): # fixes an issue with unstable VAEs that are flaky even in fp32
x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype))

return x_sample

Expand Down
Loading

0 comments on commit c3f84f9

Please sign in to comment.