Unverified Commit ab7af93e authored by Dynamic's avatar Dynamic Committed by GitHub

Merge branch 'AUTOMATIC1111:master' into kr-localization

parents 2ce44fc4 9f79e59a
...@@ -29,3 +29,4 @@ notification.mp3 ...@@ -29,3 +29,4 @@ notification.mp3
/textual_inversion /textual_inversion
.vscode .vscode
/extensions /extensions
...@@ -72,7 +72,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web ...@@ -72,7 +72,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens) - No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args) - DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args)
- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args) - [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args)
- History tab: view, direct and delete images conveniently within the UI - via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI
- Generate forever option - Generate forever option
- Training tab - Training tab
- hypernetworks and embeddings options - hypernetworks and embeddings options
...@@ -95,6 +95,16 @@ git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-grad ...@@ -95,6 +95,16 @@ git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-grad
After running this command, make sure that you have `aesthetic-gradients` dir in webui's `extensions` directory and restart After running this command, make sure that you have `aesthetic-gradients` dir in webui's `extensions` directory and restart
the UI. The interface for Aesthetic Gradients should appear exactly the same as it was. the UI. The interface for Aesthetic Gradients should appear exactly the same as it was.
## Where is History/Image browser?!?!
Image browser is now an extension. You can install it using git:
```commandline
git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser extensions/images-browser
```
After running this command, make sure that you have `images-browser` dir in webui's `extensions` directory and restart
the UI. The interface for Image browser should appear exactly the same as it was.
## Installation and Running ## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
......
var images_history_click_image = function(){
if (!this.classList.contains("transform")){
var gallery = images_history_get_parent_by_class(this, "images_history_cantainor");
var buttons = gallery.querySelectorAll(".gallery-item");
var i = 0;
var hidden_list = [];
buttons.forEach(function(e){
if (e.style.display == "none"){
hidden_list.push(i);
}
i += 1;
})
if (hidden_list.length > 0){
setTimeout(images_history_hide_buttons, 10, hidden_list, gallery);
}
}
images_history_set_image_info(this);
}
function images_history_disabled_del(){
gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){
btn.setAttribute('disabled','disabled');
});
}
function images_history_get_parent_by_class(item, class_name){
var parent = item.parentElement;
while(!parent.classList.contains(class_name)){
parent = parent.parentElement;
}
return parent;
}
function images_history_get_parent_by_tagname(item, tagname){
var parent = item.parentElement;
tagname = tagname.toUpperCase()
while(parent.tagName != tagname){
parent = parent.parentElement;
}
return parent;
}
function images_history_hide_buttons(hidden_list, gallery){
var buttons = gallery.querySelectorAll(".gallery-item");
var num = 0;
buttons.forEach(function(e){
if (e.style.display == "none"){
num += 1;
}
});
if (num == hidden_list.length){
setTimeout(images_history_hide_buttons, 10, hidden_list, gallery);
}
for( i in hidden_list){
buttons[hidden_list[i]].style.display = "none";
}
}
function images_history_set_image_info(button){
var buttons = images_history_get_parent_by_tagname(button, "DIV").querySelectorAll(".gallery-item");
var index = -1;
var i = 0;
buttons.forEach(function(e){
if(e == button){
index = i;
}
if(e.style.display != "none"){
i += 1;
}
});
var gallery = images_history_get_parent_by_class(button, "images_history_cantainor");
var set_btn = gallery.querySelector(".images_history_set_index");
var curr_idx = set_btn.getAttribute("img_index", index);
if (curr_idx != index) {
set_btn.setAttribute("img_index", index);
images_history_disabled_del();
}
set_btn.click();
}
function images_history_get_current_img(tabname, img_index, files){
return [
tabname,
gradioApp().getElementById(tabname + '_images_history_set_index').getAttribute("img_index"),
files
];
}
function images_history_delete(del_num, tabname, image_index){
image_index = parseInt(image_index);
var tab = gradioApp().getElementById(tabname + '_images_history');
var set_btn = tab.querySelector(".images_history_set_index");
var buttons = [];
tab.querySelectorAll(".gallery-item").forEach(function(e){
if (e.style.display != 'none'){
buttons.push(e);
}
});
var img_num = buttons.length / 2;
del_num = Math.min(img_num - image_index, del_num)
if (img_num <= del_num){
setTimeout(function(tabname){
gradioApp().getElementById(tabname + '_images_history_renew_page').click();
}, 30, tabname);
} else {
var next_img
for (var i = 0; i < del_num; i++){
buttons[image_index + i].style.display = 'none';
buttons[image_index + i + img_num].style.display = 'none';
next_img = image_index + i + 1
}
var bnt;
if (next_img >= img_num){
btn = buttons[image_index - 1];
} else {
btn = buttons[next_img];
}
setTimeout(function(btn){btn.click()}, 30, btn);
}
images_history_disabled_del();
}
function images_history_turnpage(tabname){
gradioApp().getElementById(tabname + '_images_history_del_button').setAttribute('disabled','disabled');
var buttons = gradioApp().getElementById(tabname + '_images_history').querySelectorAll(".gallery-item");
buttons.forEach(function(elem) {
elem.style.display = 'block';
})
}
function images_history_enable_del_buttons(){
gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){
btn.removeAttribute('disabled');
})
}
function images_history_init(){
var tabnames = gradioApp().getElementById("images_history_tabnames_list")
if (tabnames){
images_history_tab_list = tabnames.querySelector("textarea").value.split(",")
for (var i in images_history_tab_list ){
var tab = images_history_tab_list[i];
gradioApp().getElementById(tab + '_images_history').classList.add("images_history_cantainor");
gradioApp().getElementById(tab + '_images_history_set_index').classList.add("images_history_set_index");
gradioApp().getElementById(tab + '_images_history_del_button').classList.add("images_history_del_button");
gradioApp().getElementById(tab + '_images_history_gallery').classList.add("images_history_gallery");
gradioApp().getElementById(tab + "_images_history_start").setAttribute("style","padding:20px;font-size:25px");
}
//preload
if (gradioApp().getElementById("images_history_preload").querySelector("input").checked ){
var tabs_box = gradioApp().getElementById("tab_images_history").querySelector("div").querySelector("div").querySelector("div");
tabs_box.setAttribute("id", "images_history_tab");
var tab_btns = tabs_box.querySelectorAll("button");
for (var i in images_history_tab_list){
var tabname = images_history_tab_list[i]
tab_btns[i].setAttribute("tabname", tabname);
tab_btns[i].addEventListener('click', function(){
var tabs_box = gradioApp().getElementById("images_history_tab");
if (!tabs_box.classList.contains(this.getAttribute("tabname"))) {
gradioApp().getElementById(this.getAttribute("tabname") + "_images_history_start").click();
tabs_box.classList.add(this.getAttribute("tabname"))
}
});
}
tab_btns[0].click()
}
} else {
setTimeout(images_history_init, 500);
}
}
var images_history_tab_list = "";
setTimeout(images_history_init, 500);
document.addEventListener("DOMContentLoaded", function() {
var mutationObserver = new MutationObserver(function(m){
if (images_history_tab_list != ""){
for (var i in images_history_tab_list ){
let tabname = images_history_tab_list[i]
var buttons = gradioApp().querySelectorAll('#' + tabname + '_images_history .gallery-item');
buttons.forEach(function(bnt){
bnt.addEventListener('click', images_history_click_image, true);
});
var cls_btn = gradioApp().getElementById(tabname + '_images_history_gallery').querySelector("svg");
if (cls_btn){
cls_btn.addEventListener('click', function(){
gradioApp().getElementById(tabname + '_images_history_renew_page').click();
}, false);
}
}
}
});
mutationObserver.observe(gradioApp(), { childList:true, subtree:true });
});
...@@ -111,7 +111,7 @@ def prepare_enviroment(): ...@@ -111,7 +111,7 @@ def prepare_enviroment():
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26") deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff")
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl') xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
......
This diff is collapsed.
This diff is collapsed.
from modules.api.processing import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.sd_samplers import all_samplers from modules.sd_samplers import all_samplers
from modules.extras import run_pnginfo from modules.extras import run_pnginfo
......
...@@ -16,6 +16,7 @@ from modules.textual_inversion import textual_inversion ...@@ -16,6 +16,7 @@ from modules.textual_inversion import textual_inversion
from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum from torch import einsum
from collections import defaultdict, deque
from statistics import stdev, mean from statistics import stdev, mean
class HypernetworkModule(torch.nn.Module): class HypernetworkModule(torch.nn.Module):
...@@ -269,19 +270,18 @@ def stack_conds(conds): ...@@ -269,19 +270,18 @@ def stack_conds(conds):
return torch.stack(conds) return torch.stack(conds)
def log_statistics(loss_info:dict, key, value):
if key not in loss_info:
loss_info[key] = [value]
else:
loss_info[key].append(value)
if len(loss_info) > 1024:
loss_info.pop(0)
def statistics(data): def statistics(data):
total_information = f"loss:{mean(data):.3f}"+u"\u00B1"+f"({stdev(data)/ (len(data)**0.5):.3f})" if len(data) < 2:
std = 0
else:
std = stdev(data)
total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
recent_data = data[-32:] recent_data = data[-32:]
recent_information = f"recent 32 loss:{mean(recent_data):.3f}"+u"\u00B1"+f"({stdev(recent_data)/ (len(recent_data)**0.5):.3f})" if len(recent_data) < 2:
std = 0
else:
std = stdev(recent_data)
recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
return total_information, recent_information return total_information, recent_information
...@@ -290,7 +290,7 @@ def report_statistics(loss_info:dict): ...@@ -290,7 +290,7 @@ def report_statistics(loss_info:dict):
for key in keys: for key in keys:
try: try:
print("Loss statistics for file " + key) print("Loss statistics for file " + key)
info, recent = statistics(loss_info[key]) info, recent = statistics(list(loss_info[key]))
print(info) print(info)
print(recent) print(recent)
except Exception as e: except Exception as e:
...@@ -341,8 +341,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log ...@@ -341,8 +341,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
weight.requires_grad = True weight.requires_grad = True
size = len(ds.indexes) size = len(ds.indexes)
loss_dict = {} loss_dict = defaultdict(lambda : deque(maxlen = 1024))
losses = torch.zeros((size,)) losses = torch.zeros((size,))
previous_mean_losses = [0]
previous_mean_loss = 0 previous_mean_loss = 0
print("Mean loss of {} elements".format(size)) print("Mean loss of {} elements".format(size))
...@@ -364,7 +365,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log ...@@ -364,7 +365,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
for i, entries in pbar: for i, entries in pbar:
hypernetwork.step = i + ititial_step hypernetwork.step = i + ititial_step
if len(loss_dict) > 0: if len(loss_dict) > 0:
previous_mean_loss = sum(i[-1] for i in loss_dict.values()) / len(loss_dict) previous_mean_losses = [i[-1] for i in loss_dict.values()]
previous_mean_loss = mean(previous_mean_losses)
scheduler.apply(optimizer, hypernetwork.step) scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished: if scheduler.finished:
...@@ -383,7 +385,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log ...@@ -383,7 +385,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
losses[hypernetwork.step % losses.shape[0]] = loss.item() losses[hypernetwork.step % losses.shape[0]] = loss.item()
for entry in entries: for entry in entries:
log_statistics(loss_dict, entry.filename, loss.item()) loss_dict[entry.filename].append(loss.item())
optimizer.zero_grad() optimizer.zero_grad()
weights[0].grad = None weights[0].grad = None
...@@ -399,7 +401,13 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log ...@@ -399,7 +401,13 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
raise RuntimeError("Loss diverged.") raise RuntimeError("Loss diverged.")
pbar.set_description(f"dataset loss: {previous_mean_loss:.7f}")
if len(previous_mean_losses) > 1:
std = stdev(previous_mean_losses)
else:
std = 0
dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})"
pbar.set_description(dataset_loss_info)
if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0: if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint. # Before saving, change name to match current checkpoint.
......
This diff is collapsed.
This diff is collapsed.
...@@ -47,6 +47,25 @@ def apply_color_correction(correction, image): ...@@ -47,6 +47,25 @@ def apply_color_correction(correction, image):
return image return image
def apply_overlay(image, paste_loc, index, overlays):
if overlays is None or index >= len(overlays):
return image
overlay = overlays[index]
if paste_loc is not None:
x, y, w, h = paste_loc
base_image = Image.new('RGBA', (overlay.width, overlay.height))
image = images.resize_image(1, image, w, h)
base_image.paste(image, (x, y))
image = base_image
image = image.convert('RGBA')
image.alpha_composite(overlay)
image = image.convert('RGB')
return image
def get_correct_sampler(p): def get_correct_sampler(p):
if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img): if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
return sd_samplers.samplers return sd_samplers.samplers
...@@ -449,22 +468,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed: ...@@ -449,22 +468,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if p.color_corrections is not None and i < len(p.color_corrections): if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image) image = apply_color_correction(p.color_corrections[i], image)
if p.overlay_images is not None and i < len(p.overlay_images): image = apply_overlay(image, p.paste_to, i, p.overlay_images)
overlay = p.overlay_images[i]
if p.paste_to is not None:
x, y, w, h = p.paste_to
base_image = Image.new('RGBA', (overlay.width, overlay.height))
image = images.resize_image(1, image, w, h)
base_image.paste(image, (x, y))
image = base_image
image = image.convert('RGBA')
image.alpha_composite(overlay)
image = image.convert('RGB')
if opts.samples_save and not p.do_not_save_samples: if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p) images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
......
...@@ -16,7 +16,7 @@ def model_loaded_callback(sd_model): ...@@ -16,7 +16,7 @@ def model_loaded_callback(sd_model):
def ui_tabs_callback(): def ui_tabs_callback():
res = [] res = []
for callback in callbacks_ui_tabs: for callback in callbacks_ui_tabs:
res += callback() or [] res += callback() or []
......
...@@ -80,12 +80,13 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode ...@@ -80,12 +80,13 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui") parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui") parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None) parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--browse-all-images", action='store_true', help="Allow browsing all images by Image Browser", default=False)
cmd_opts = parser.parse_args() cmd_opts = parser.parse_args()
restricted_opts = [ restricted_opts = [
"samples_filename_pattern", "samples_filename_pattern",
"directories_filename_pattern",
"outdir_samples", "outdir_samples",
"outdir_txt2img_samples", "outdir_txt2img_samples",
"outdir_img2img_samples", "outdir_img2img_samples",
...@@ -190,7 +191,8 @@ options_templates = {} ...@@ -190,7 +191,8 @@ options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), { options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"), "samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'), "samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern"), "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"grid_save": OptionInfo(True, "Always save all generated image grids"), "grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'), "grid_format": OptionInfo('png', 'File format for grids'),
...@@ -225,8 +227,8 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo ...@@ -225,8 +227,8 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"), "save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"), "grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"), "directories_filename_pattern": OptionInfo("", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}), "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
})) }))
options_templates.update(options_section(('upscaling', "Upscaling"), { options_templates.update(options_section(('upscaling', "Upscaling"), {
...@@ -320,15 +322,6 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" ...@@ -320,15 +322,6 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
})) }))
options_templates.update(options_section(('images-history', "Images Browser"), {
#"images_history_reconstruct_directory": OptionInfo(False, "Reconstruct output directory structure.This can greatly improve the speed of loading , but will change the original output directory structure"),
"images_history_preload": OptionInfo(False, "Preload images at startup"),
"images_history_num_per_page": OptionInfo(36, "Number of pictures displayed on each page"),
"images_history_pages_num": OptionInfo(6, "Minimum number of pages per load "),
"images_history_grid_num": OptionInfo(6, "Number of grids in each row"),
}))
class Options: class Options:
data = None data = None
...@@ -357,7 +350,7 @@ class Options: ...@@ -357,7 +350,7 @@ class Options:
def save(self, filename): def save(self, filename):
with open(filename, "w", encoding="utf8") as file: with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file) json.dump(self.data, file, indent=4)
def same_type(self, x, y): def same_type(self, x, y):
if x is None or y is None: if x is None or y is None:
......
...@@ -38,7 +38,6 @@ import modules.codeformer_model ...@@ -38,7 +38,6 @@ import modules.codeformer_model
import modules.generation_parameters_copypaste import modules.generation_parameters_copypaste
import modules.gfpgan_model import modules.gfpgan_model
import modules.hypernetworks.ui import modules.hypernetworks.ui
import modules.images_history as img_his
import modules.ldsr_model import modules.ldsr_model
import modules.scripts import modules.scripts
import modules.shared as shared import modules.shared as shared
...@@ -51,12 +50,11 @@ from modules.sd_samplers import samplers, samplers_for_img2img ...@@ -51,12 +50,11 @@ from modules.sd_samplers import samplers, samplers_for_img2img
import modules.textual_inversion.ui import modules.textual_inversion.ui
import modules.hypernetworks.ui import modules.hypernetworks.ui
import modules.images_history as img_his
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init() mimetypes.init()
mimetypes.add_type('application/javascript', '.js') mimetypes.add_type('application/javascript', '.js')
txt2img_paste_fields = []
img2img_paste_fields = []
if not cmd_opts.share and not cmd_opts.listen: if not cmd_opts.share and not cmd_opts.listen:
...@@ -786,6 +784,7 @@ def create_ui(wrap_gradio_gpu_call): ...@@ -786,6 +784,7 @@ def create_ui(wrap_gradio_gpu_call):
] ]
) )
global txt2img_paste_fields
txt2img_paste_fields = [ txt2img_paste_fields = [
(txt2img_prompt, "Prompt"), (txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"), (txt2img_negative_prompt, "Negative prompt"),
...@@ -1056,6 +1055,7 @@ def create_ui(wrap_gradio_gpu_call): ...@@ -1056,6 +1055,7 @@ def create_ui(wrap_gradio_gpu_call):
outputs=[prompt, negative_prompt, style1, style2], outputs=[prompt, negative_prompt, style1, style2],
) )
global img2img_paste_fields
img2img_paste_fields = [ img2img_paste_fields = [
(img2img_prompt, "Prompt"), (img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"), (img2img_negative_prompt, "Negative prompt"),
...@@ -1104,9 +1104,9 @@ def create_ui(wrap_gradio_gpu_call): ...@@ -1104,9 +1104,9 @@ def create_ui(wrap_gradio_gpu_call):
upscaling_resize_w = gr.Number(label="Width", value=512, precision=0) upscaling_resize_w = gr.Number(label="Width", value=512, precision=0)
upscaling_resize_h = gr.Number(label="Height", value=512, precision=0) upscaling_resize_h = gr.Number(label="Height", value=512, precision=0)
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True) upscaling_crop = gr.Checkbox(label='Crop to fit', value=True)
with gr.Group(): with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group(): with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
...@@ -1193,15 +1193,7 @@ def create_ui(wrap_gradio_gpu_call): ...@@ -1193,15 +1193,7 @@ def create_ui(wrap_gradio_gpu_call):
inputs=[image], inputs=[image],
outputs=[html, generation_info, html2], outputs=[html, generation_info, html2],
) )
#images history
images_history_switch_dict = {
"fn": modules.generation_parameters_copypaste.connect_paste,
"t2i": txt2img_paste_fields,
"i2i": img2img_paste_fields
}
images_history = img_his.create_history_tabs(gr, opts, cmd_opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict)
with gr.Blocks() as modelmerger_interface: with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False): with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'): with gr.Column(variant='panel'):
...@@ -1650,7 +1642,6 @@ Requested path was: {f} ...@@ -1650,7 +1642,6 @@ Requested path was: {f}
(img2img_interface, "img2img", "img2img"), (img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"), (extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"), (pnginfo_interface, "PNG Info", "pnginfo"),
(images_history, "Image Browser", "images_history"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(train_interface, "Train", "ti"), (train_interface, "Train", "ti"),
] ]
...@@ -1894,6 +1885,7 @@ def load_javascript(raw_response): ...@@ -1894,6 +1885,7 @@ def load_javascript(raw_response):
javascript = f'<script>{jsfile.read()}</script>' javascript = f'<script>{jsfile.read()}</script>'
scripts_list = modules.scripts.list_scripts("javascript", ".js") scripts_list = modules.scripts.list_scripts("javascript", ".js")
for basedir, filename, path in scripts_list: for basedir, filename, path in scripts_list:
with open(path, "r", encoding="utf8") as jsfile: with open(path, "r", encoding="utf8") as jsfile:
javascript += f"\n<!-- {filename} --><script>{jsfile.read()}</script>" javascript += f"\n<!-- {filename} --><script>{jsfile.read()}</script>"
......
...@@ -9,7 +9,7 @@ from fastapi.middleware.gzip import GZipMiddleware ...@@ -9,7 +9,7 @@ from fastapi.middleware.gzip import GZipMiddleware
from modules.paths import script_path from modules.paths import script_path
from modules import devices, sd_samplers from modules import devices, sd_samplers, upscaler
import modules.codeformer_model as codeformer import modules.codeformer_model as codeformer
import modules.extras import modules.extras
import modules.face_restoration import modules.face_restoration
...@@ -73,6 +73,11 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): ...@@ -73,6 +73,11 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
def initialize(): def initialize():
if cmd_opts.ui_debug_mode:
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
modules.scripts.load_scripts()
return
modelloader.cleanup_models() modelloader.cleanup_models()
modules.sd_models.setup_model() modules.sd_models.setup_model()
codeformer.setup_model(cmd_opts.codeformer_models_path) codeformer.setup_model(cmd_opts.codeformer_models_path)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment