Unverified Commit 4918eb6c authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub

Merge branch 'master' into hn-activation

parents 80844ac8 2cf3d2ac
...@@ -44,7 +44,7 @@ body: ...@@ -44,7 +44,7 @@ body:
id: commit id: commit
attributes: attributes:
label: Commit where the problem happens label: Commit where the problem happens
description: Which commit are you running ? (copy the **Commit hash** shown in the cmd/terminal when you launch the UI) description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit hash** shown in the cmd/terminal when you launch the UI)
validations: validations:
required: true required: true
- type: dropdown - type: dropdown
......
...@@ -29,4 +29,5 @@ notification.mp3 ...@@ -29,4 +29,5 @@ notification.mp3
/textual_inversion /textual_inversion
.vscode .vscode
/extensions /extensions
/test/stdout.txt
/test/stderr.txt
* @AUTOMATIC1111 * @AUTOMATIC1111
/localizations/ar_AR.json @xmodar @blackneoo
/localizations/de_DE.json @LunixWasTaken
/localizations/es_ES.json @innovaciones
/localizations/fr_FR.json @tumbly
/localizations/it_IT.json @EugenioBuffo
/localizations/ja_JP.json @yuuki76
/localizations/ko_KR.json @36DB
/localizations/pt_BR.json @M-art-ucci
/localizations/ru_RU.json @kabachuha
/localizations/tr_TR.json @camenduru
/localizations/zh_CN.json @dtlnor @bgluminous
/localizations/zh_TW.json @benlisquare
function extensions_apply(_, _){
disable = []
update = []
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
if(x.name.startsWith("enable_") && ! x.checked)
disable.push(x.name.substr(7))
if(x.name.startsWith("update_") && x.checked)
update.push(x.name.substr(7))
})
restart_reload()
return [JSON.stringify(disable), JSON.stringify(update)]
}
function extensions_check(){
gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){
x.innerHTML = "Loading..."
})
return []
}
function install_extension_from_index(button, url){
button.disabled = "disabled"
button.value = "Installing..."
textarea = gradioApp().querySelector('#extension_to_install textarea')
textarea.value = url
textarea.dispatchEvent(new Event("input", { bubbles: true }))
gradioApp().querySelector('#install_extension_button').click()
}
...@@ -75,6 +75,7 @@ titles = { ...@@ -75,6 +75,7 @@ titles = {
"Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.", "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.",
"Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.", "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
"Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.",
"vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).", "vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).",
......
...@@ -13,6 +13,15 @@ function showModal(event) { ...@@ -13,6 +13,15 @@ function showModal(event) {
} }
lb.style.display = "block"; lb.style.display = "block";
lb.focus() lb.focus()
const tabTxt2Img = gradioApp().getElementById("tab_txt2img")
const tabImg2Img = gradioApp().getElementById("tab_img2img")
// show the save button in modal only on txt2img or img2img tabs
if (tabTxt2Img.style.display != "none" || tabImg2Img.style.display != "none") {
gradioApp().getElementById("modal_save").style.display = "inline"
} else {
gradioApp().getElementById("modal_save").style.display = "none"
}
event.stopPropagation() event.stopPropagation()
} }
...@@ -81,6 +90,25 @@ function modalImageSwitch(offset) { ...@@ -81,6 +90,25 @@ function modalImageSwitch(offset) {
} }
} }
function saveImage(){
const tabTxt2Img = gradioApp().getElementById("tab_txt2img")
const tabImg2Img = gradioApp().getElementById("tab_img2img")
const saveTxt2Img = "save_txt2img"
const saveImg2Img = "save_img2img"
if (tabTxt2Img.style.display != "none") {
gradioApp().getElementById(saveTxt2Img).click()
} else if (tabImg2Img.style.display != "none") {
gradioApp().getElementById(saveImg2Img).click()
} else {
console.error("missing implementation for saving modal of this type")
}
}
function modalSaveImage(event) {
saveImage()
event.stopPropagation()
}
function modalNextImage(event) { function modalNextImage(event) {
modalImageSwitch(1) modalImageSwitch(1)
event.stopPropagation() event.stopPropagation()
...@@ -93,6 +121,9 @@ function modalPrevImage(event) { ...@@ -93,6 +121,9 @@ function modalPrevImage(event) {
function modalKeyHandler(event) { function modalKeyHandler(event) {
switch (event.key) { switch (event.key) {
case "s":
saveImage()
break;
case "ArrowLeft": case "ArrowLeft":
modalPrevImage(event) modalPrevImage(event)
break; break;
...@@ -198,6 +229,14 @@ document.addEventListener("DOMContentLoaded", function() { ...@@ -198,6 +229,14 @@ document.addEventListener("DOMContentLoaded", function() {
modalTileImage.title = "Preview tiling"; modalTileImage.title = "Preview tiling";
modalControls.appendChild(modalTileImage) modalControls.appendChild(modalTileImage)
const modalSave = document.createElement("span")
modalSave.className = "modalSave cursor"
modalSave.id = "modal_save"
modalSave.innerHTML = "🖫"
modalSave.addEventListener("click", modalSaveImage, true)
modalSave.title = "Save Image(s)"
modalControls.appendChild(modalSave)
const modalClose = document.createElement('span') const modalClose = document.createElement('span')
modalClose.className = 'modalClose cursor'; modalClose.className = 'modalClose cursor';
modalClose.innerHTML = '×' modalClose.innerHTML = '×'
......
...@@ -3,8 +3,21 @@ global_progressbars = {} ...@@ -3,8 +3,21 @@ global_progressbars = {}
galleries = {} galleries = {}
galleryObservers = {} galleryObservers = {}
// this tracks laumnches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running
timeoutIds = {}
function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){
var progressbar = gradioApp().getElementById(id_progressbar) // gradio 3.8's enlightened approach allows them to create two nested div elements inside each other with same id
// every time you use gr.HTML(elem_id='xxx'), so we handle this here
var progressbar = gradioApp().querySelector("#"+id_progressbar+" #"+id_progressbar)
var progressbarParent
if(progressbar){
progressbarParent = gradioApp().querySelector("#"+id_progressbar)
} else{
progressbar = gradioApp().getElementById(id_progressbar)
progressbarParent = null
}
var skip = id_skip ? gradioApp().getElementById(id_skip) : null var skip = id_skip ? gradioApp().getElementById(id_skip) : null
var interrupt = gradioApp().getElementById(id_interrupt) var interrupt = gradioApp().getElementById(id_interrupt)
...@@ -26,18 +39,26 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip ...@@ -26,18 +39,26 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
global_progressbars[id_progressbar] = progressbar global_progressbars[id_progressbar] = progressbar
var mutationObserver = new MutationObserver(function(m){ var mutationObserver = new MutationObserver(function(m){
if(timeoutIds[id_part]) return;
preview = gradioApp().getElementById(id_preview) preview = gradioApp().getElementById(id_preview)
gallery = gradioApp().getElementById(id_gallery) gallery = gradioApp().getElementById(id_gallery)
if(preview != null && gallery != null){ if(preview != null && gallery != null){
preview.style.width = gallery.clientWidth + "px" preview.style.width = gallery.clientWidth + "px"
preview.style.height = gallery.clientHeight + "px" preview.style.height = gallery.clientHeight + "px"
if(progressbarParent) progressbar.style.width = progressbarParent.clientWidth + "px"
//only watch gallery if there is a generation process going on //only watch gallery if there is a generation process going on
check_gallery(id_gallery); check_gallery(id_gallery);
var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0; var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
if(!progressDiv){ if(progressDiv){
timeoutIds[id_part] = window.setTimeout(function() {
timeoutIds[id_part] = null
requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt)
}, 500)
} else{
if (skip) { if (skip) {
skip.style.display = "none" skip.style.display = "none"
} }
...@@ -47,13 +68,10 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip ...@@ -47,13 +68,10 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
if (galleryObservers[id_gallery]) { if (galleryObservers[id_gallery]) {
galleryObservers[id_gallery].disconnect(); galleryObservers[id_gallery].disconnect();
galleries[id_gallery] = null; galleries[id_gallery] = null;
} }
} }
} }
window.setTimeout(function() { requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt) }, 500)
}); });
mutationObserver.observe( progressbar, { childList:true, subtree:true }) mutationObserver.observe( progressbar, { childList:true, subtree:true })
} }
......
...@@ -45,14 +45,14 @@ function switch_to_txt2img(){ ...@@ -45,14 +45,14 @@ function switch_to_txt2img(){
return args_to_array(arguments); return args_to_array(arguments);
} }
function switch_to_img2img_img2img(){ function switch_to_img2img(){
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click(); gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
gradioApp().getElementById('mode_img2img').querySelectorAll('button')[0].click(); gradioApp().getElementById('mode_img2img').querySelectorAll('button')[0].click();
return args_to_array(arguments); return args_to_array(arguments);
} }
function switch_to_img2img_inpaint(){ function switch_to_inpaint(){
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click(); gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
gradioApp().getElementById('mode_img2img').querySelectorAll('button')[1].click(); gradioApp().getElementById('mode_img2img').querySelectorAll('button')[1].click();
...@@ -65,26 +65,6 @@ function switch_to_extras(){ ...@@ -65,26 +65,6 @@ function switch_to_extras(){
return args_to_array(arguments); return args_to_array(arguments);
} }
function extract_image_from_gallery_txt2img(gallery){
switch_to_txt2img()
return extract_image_from_gallery(gallery);
}
function extract_image_from_gallery_img2img(gallery){
switch_to_img2img_img2img()
return extract_image_from_gallery(gallery);
}
function extract_image_from_gallery_inpaint(gallery){
switch_to_img2img_inpaint()
return extract_image_from_gallery(gallery);
}
function extract_image_from_gallery_extras(gallery){
switch_to_extras()
return extract_image_from_gallery(gallery);
}
function get_tab_index(tabId){ function get_tab_index(tabId){
var res = 0 var res = 0
......
...@@ -7,6 +7,7 @@ import shlex ...@@ -7,6 +7,7 @@ import shlex
import platform import platform
dir_repos = "repositories" dir_repos = "repositories"
dir_extensions = "extensions"
python = sys.executable python = sys.executable
git = os.environ.get('GIT', "git") git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "") index_url = os.environ.get('INDEX_URL', "")
...@@ -16,11 +17,11 @@ def extract_arg(args, name): ...@@ -16,11 +17,11 @@ def extract_arg(args, name):
return [x for x in args if x != name], name in args return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None): def run(command, desc=None, errdesc=None, custom_env=None):
if desc is not None: if desc is not None:
print(desc) print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0: if result.returncode != 0:
...@@ -101,9 +102,27 @@ def version_check(commit): ...@@ -101,9 +102,27 @@ def version_check(commit):
else: else:
print("Not a git clone, can't perform version check.") print("Not a git clone, can't perform version check.")
except Exception as e: except Exception as e:
print("versipm check failed",e) print("version check failed", e)
def run_extensions_installers():
if not os.path.isdir(dir_extensions):
return
for dirname_extension in os.listdir(dir_extensions):
path_installer = os.path.join(dir_extensions, dirname_extension, "install.py")
if not os.path.isfile(path_installer):
continue
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env))
except Exception as e:
print(e, file=sys.stderr)
def prepare_enviroment(): def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
...@@ -128,10 +147,12 @@ def prepare_enviroment(): ...@@ -128,10 +147,12 @@ def prepare_enviroment():
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
sys.argv += shlex.split(commandline_args) sys.argv += shlex.split(commandline_args)
test_argv = [x for x in sys.argv if x != '--tests']
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test') sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers') sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, update_check = extract_arg(sys.argv, '--update-check') sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests = extract_arg(sys.argv, '--tests')
xformers = '--xformers' in sys.argv xformers = '--xformers' in sys.argv
deepdanbooru = '--deepdanbooru' in sys.argv deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv ngrok = '--ngrok' in sys.argv
...@@ -187,6 +208,8 @@ def prepare_enviroment(): ...@@ -187,6 +208,8 @@ def prepare_enviroment():
run_pip(f"install -r {requirements_file}", "requirements for Web UI") run_pip(f"install -r {requirements_file}", "requirements for Web UI")
run_extensions_installers()
if update_check: if update_check:
version_check(commit) version_check(commit)
...@@ -194,6 +217,26 @@ def prepare_enviroment(): ...@@ -194,6 +217,26 @@ def prepare_enviroment():
print("Exiting because of --exit argument") print("Exiting because of --exit argument")
exit(0) exit(0)
if run_tests:
tests(test_argv)
exit(0)
def tests(argv):
if "--api" not in argv:
argv.append("--api")
print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}")
with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr:
proc = subprocess.Popen([sys.executable, *argv], stdout=stdout, stderr=stderr)
import test.server_poll
test.server_poll.run_tests()
print(f"Stopping Web UI process with id {proc.pid}")
proc.kill()
def start_webui(): def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
from array import array import inspect
from inflection import underscore from click import prompt
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, create_model from pydantic import BaseModel, Field, create_model
from typing import Any, Optional
from typing_extensions import Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
import inspect from modules.shared import sd_upscalers
API_NOT_ALLOWED = [ API_NOT_ALLOWED = [
"self", "self",
...@@ -51,17 +52,17 @@ class PydanticModelGenerator: ...@@ -51,17 +52,17 @@ class PydanticModelGenerator:
# field_type = str if not overrides.get(k) else overrides[k]["type"] # field_type = str if not overrides.get(k) else overrides[k]["type"]
# print(k, v.annotation, v.default) # print(k, v.annotation, v.default)
field_type = v.annotation field_type = v.annotation
return Optional[field_type] return Optional[field_type]
def merge_class_params(class_): def merge_class_params(class_):
all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_)))
parameters = {} parameters = {}
for classes in all_classes: for classes in all_classes:
parameters = {**parameters, **inspect.signature(classes.__init__).parameters} parameters = {**parameters, **inspect.signature(classes.__init__).parameters}
return parameters return parameters
self._model_name = model_name self._model_name = model_name
self._class_data = merge_class_params(class_instance) self._class_data = merge_class_params(class_instance)
self._model_def = [ self._model_def = [
...@@ -73,11 +74,11 @@ class PydanticModelGenerator: ...@@ -73,11 +74,11 @@ class PydanticModelGenerator:
) )
for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED
] ]
for fields in additional_fields: for fields in additional_fields:
self._model_def.append(ModelDef( self._model_def.append(ModelDef(
field=underscore(fields["key"]), field=underscore(fields["key"]),
field_alias=fields["key"], field_alias=fields["key"],
field_type=fields["type"], field_type=fields["type"],
field_value=fields["default"], field_value=fields["default"],
field_exclude=fields["exclude"] if "exclude" in fields else False)) field_exclude=fields["exclude"] if "exclude" in fields else False))
...@@ -94,15 +95,74 @@ class PydanticModelGenerator: ...@@ -94,15 +95,74 @@ class PydanticModelGenerator:
DynamicModel.__config__.allow_population_by_field_name = True DynamicModel.__config__.allow_population_by_field_name = True
DynamicModel.__config__.allow_mutation = True DynamicModel.__config__.allow_mutation = True
return DynamicModel return DynamicModel
StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingTxt2Img", "StableDiffusionProcessingTxt2Img",
StableDiffusionProcessingTxt2Img, StableDiffusionProcessingTxt2Img,
[{"key": "sampler_index", "type": str, "default": "Euler"}] [{"key": "sampler_index", "type": str, "default": "Euler"}]
).generate_model() ).generate_model()
StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingImg2Img", "StableDiffusionProcessingImg2Img",
StableDiffusionProcessingImg2Img, StableDiffusionProcessingImg2Img,
[{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}] [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}]
).generate_model() ).generate_model()
\ No newline at end of file
class TextToImageResponse(BaseModel):
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
class ImageToImageResponse(BaseModel):
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
class ExtrasBaseRequest(BaseModel):
resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.")
show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?")
gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.")
upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?")
upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?")
class ExtraBaseResponse(BaseModel):
html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
class ExtrasSingleImageRequest(ExtrasBaseRequest):
image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
class ExtrasSingleImageResponse(ExtraBaseResponse):
image: str = Field(default=None, title="Image", description="The generated image in base64 format.")
class FileData(BaseModel):
data: str = Field(title="File data", description="Base64 representation of the file")
name: str = Field(title="File name")
class ExtrasBatchImagesRequest(ExtrasBaseRequest):
imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
class ExtrasBatchImagesResponse(ExtraBaseResponse):
images: list[str] = Field(title="Images", description="The generated images in base64 format.")
class PNGInfoRequest(BaseModel):
image: str = Field(title="Image", description="The base64 encoded PNG image")
class PNGInfoResponse(BaseModel):
info: str = Field(title="Image info", description="A string with all the info the image had")
class ProgressRequest(BaseModel):
skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
class ProgressResponse(BaseModel):
progress: float = Field(title="Progress", description="The progress with a range of 0 to 1")
eta_relative: float = Field(title="ETA in secs")
state: dict = Field(title="State", description="The current state snapshot")
current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
...@@ -50,6 +50,7 @@ def mod2normal(state_dict): ...@@ -50,6 +50,7 @@ def mod2normal(state_dict):
def resrgan2normal(state_dict, nb=23): def resrgan2normal(state_dict, nb=23):
# this code is copied from https://github.com/victorca25/iNNfer # this code is copied from https://github.com/victorca25/iNNfer
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
re8x = 0
crt_net = {} crt_net = {}
items = [] items = []
for k, v in state_dict.items(): for k, v in state_dict.items():
...@@ -75,10 +76,18 @@ def resrgan2normal(state_dict, nb=23): ...@@ -75,10 +76,18 @@ def resrgan2normal(state_dict, nb=23):
crt_net['model.3.bias'] = state_dict['conv_up1.bias'] crt_net['model.3.bias'] = state_dict['conv_up1.bias']
crt_net['model.6.weight'] = state_dict['conv_up2.weight'] crt_net['model.6.weight'] = state_dict['conv_up2.weight']
crt_net['model.6.bias'] = state_dict['conv_up2.bias'] crt_net['model.6.bias'] = state_dict['conv_up2.bias']
crt_net['model.8.weight'] = state_dict['conv_hr.weight']
crt_net['model.8.bias'] = state_dict['conv_hr.bias'] if 'conv_up3.weight' in state_dict:
crt_net['model.10.weight'] = state_dict['conv_last.weight'] # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
crt_net['model.10.bias'] = state_dict['conv_last.bias'] re8x = 3
crt_net['model.9.weight'] = state_dict['conv_up3.weight']
crt_net['model.9.bias'] = state_dict['conv_up3.bias']
crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight']
crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias']
crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight']
crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias']
state_dict = crt_net state_dict = crt_net
return state_dict return state_dict
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -8,7 +8,8 @@ import modules.textual_inversion.textual_inversion ...@@ -8,7 +8,8 @@ import modules.textual_inversion.textual_inversion
from modules import devices, sd_hijack, shared from modules import devices, sd_hijack, shared
from modules.hypernetworks import hypernetwork from modules.hypernetworks import hypernetwork
keys = list(hypernetwork.HypernetworkModule.activation_dict.keys()) not_available = ["hardswish", "multiheadattention"]
keys = ["linear"] + list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False): def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
# Remove illegal characters from name. # Remove illegal characters from name.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment