Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #1

Merged
merged 139 commits into from
Jun 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
139 commits
Select commit Hold shift + click to select a range
2b717bb
fix initial corrupt model loop
w-e-w Apr 22, 2024
4bc39d2
Show LoRA if model is None
w-e-w Apr 22, 2024
246c269
add option to check file hash after download
w-e-w Apr 22, 2024
c69773d
ensure integrity for initial sd model download
w-e-w Apr 22, 2024
a1aa0af
add code for skipping CFG on early steps
drhead Apr 23, 2024
8016d78
add option for early cfg skip
drhead Apr 23, 2024
8326620
Add KL Optimal scheduler
drhead Apr 23, 2024
83182d2
change skip early cond option name and to float
drhead Apr 23, 2024
6e9b69a
change skip_early_cond code to use float
drhead Apr 23, 2024
33cbbf9
add s_min_uncond_all option
drhead Apr 23, 2024
029adbe
implement option to skip uncond on all steps below ngms
drhead Apr 23, 2024
50bb6e1
AVIF has quality setting too
pinanew Apr 23, 2024
8fa3fa7
fix exif_bytes referenced before assignment
w-e-w Apr 23, 2024
1091e3a
update jpeg_quality description
w-e-w Apr 23, 2024
e85e327
more extension tag filtering options
light-and-ray Apr 25, 2024
d5f6fdb
compact-checkbox-group
w-e-w Apr 26, 2024
3902aa2
Better error handling to skip non-standard ss_tag_frequency content
brendanhoar Apr 26, 2024
8dc9202
Better error handling when unable to read metadata from safetensors file
brendanhoar Apr 26, 2024
c5b7559
Better error handling when unable to extract contents of embedding/TI…
brendanhoar Apr 26, 2024
c5ae225
Better handling of embeddings with two rare, but not unusual, files i…
brendanhoar Apr 26, 2024
44afb48
Linter fix - extraneous whitespace
brendanhoar Apr 26, 2024
60c0799
Linter - except must not be bare.
brendanhoar Apr 26, 2024
9d964d3
no-referrer
w-e-w Apr 27, 2024
3a215de
vectorize kl-optimal sigma calculation
drhead Apr 28, 2024
3d3fc81
Add correct mimetype for .mjs files
huchenlei Apr 28, 2024
579f1ef
Allow old sampler names in API
missionfloyd Apr 29, 2024
4c7b22d
Fix dragging text within prompt input
missionfloyd Apr 29, 2024
c8336c4
Use existing function for old sampler names
missionfloyd Apr 30, 2024
9d39380
fix extra batch mode P Transparency
w-e-w Apr 30, 2024
89103b4
lora bundled TI infotext
w-e-w May 1, 2024
0e0e41e
use gradio theme colors in css
light-and-ray May 1, 2024
5d5224b
fix_p_invalid_sampler_and_scheduler
w-e-w May 1, 2024
7195c4d
two fingers press to open context menu
light-and-ray May 1, 2024
f12886a
use script_path for webui root in launch_utils
w-e-w May 4, 2024
dd93c47
Update imageviewer.js
bluelovers May 7, 2024
dbda59e
fix context menu position
light-and-ray May 7, 2024
e736c3b
Add files via upload
JLipnerPitt May 8, 2024
f7e349c
Add AVIF MIME type support to mimetype definitions
LoganBooker May 8, 2024
5fbac49
Added --models-dir option
MarcusNyne May 8, 2024
d2cc8cc
When creating a virtual environment, upgrade pip
MarcusNyne May 9, 2024
73d1caf
Add Align Your Steps to available schedulers
LoganBooker May 10, 2024
d6b4444
Use shared.sd_model.is_sdxl to determine base AYS sigmas
LoganBooker May 10, 2024
d44f241
use relative path for webui-assets css
w-e-w May 11, 2024
ef7713f
chore: sync v1.8.0 packages according to changelog, fix warning
elf-mouse May 14, 2024
5ab7d08
fix extention update when not on main branch
w-e-w May 15, 2024
022d835
use_checkpoint = False
huchenlei May 15, 2024
0e98529
Replace einops.rearrange with torch native
huchenlei May 15, 2024
9eb2f78
Precompute is_sdxl_inpaint flag
huchenlei May 15, 2024
6a48476
Fix flag check for SD15
huchenlei May 15, 2024
3e20b36
Fix attr access
huchenlei May 15, 2024
9c8075b
torch_utils.float64
w-e-w May 16, 2024
41f6684
mps, xpu compatibility
w-e-w May 16, 2024
f015b94
use torch_utils.float64
w-e-w May 16, 2024
51b13a8
Prevent uncessary bias backup
huchenlei May 16, 2024
b2ae449
Fix LoRA bias error
huchenlei May 16, 2024
221ac0b
img2img batch upload method
light-and-ray May 16, 2024
58eec83
Fully prevent use_checkpoint
huchenlei May 16, 2024
2a8a60c
Add --precision half cmd option
huchenlei May 16, 2024
47f1d42
Fix for SD15 models
huchenlei May 17, 2024
10f2407
xyz csv skipinitialspace
w-e-w May 17, 2024
53d6708
Patch timestep embedding to create tensor on-device
drhead May 17, 2024
cc9ca67
Add transformer forward patch
drhead May 17, 2024
dca9007
Fix SD15 dtype
huchenlei May 17, 2024
b57a70f
Proper fix of SD15 dtype
huchenlei May 17, 2024
1d74482
Default device for sigma tensor to CPU
LoganBooker May 17, 2024
281e0a0
scroll extensions table on overflow
light-and-ray May 18, 2024
feeb680
fix case where first step skilled if skip early cond is 0
drhead May 18, 2024
501ac01
Reformat
w-e-w May 18, 2024
969a462
xyz util confirm_range
w-e-w May 18, 2024
24a59ad
fix Hypertile xyz grid
w-e-w May 18, 2024
82884da
use apply_override for Clip skip
w-e-w May 18, 2024
1f39251
use override for uni_pc_order
w-e-w May 18, 2024
1e696b0
use override of sd_vae
w-e-w May 18, 2024
51e7122
remove unused code
w-e-w May 18, 2024
5867be2
Use different torch versions for Intel and ARM Macs
viking1304 May 20, 2024
344eda5
ReloadUI backgroundColor --background-fill-primary
w-e-w May 22, 2024
6dd53ce
Fix bug where file extension had an extra '.' under some circumstances
alcacode May 26, 2024
8d6f741
#15883 -> #15882
w-e-w May 28, 2024
10f8d0f
feat: lora partial update precede full update.
eatmoreapple Jun 4, 2024
25bbf31
Fix for grids without comprehensive infotexts
NouberNou Jun 6, 2024
53f6267
Typo on edit
NouberNou Jun 6, 2024
3c7384a
Merge pull request #15958 from NouberNou/Fix-Grids-Without-Infotexts
AUTOMATIC1111 Jun 8, 2024
46bcfbe
Merge pull request #15751 from LoganBooker/LoganBooker-Add-AlignYourS…
AUTOMATIC1111 Jun 8, 2024
6de733c
Merge pull request #15943 from eatmoreapple/update-lora-load
AUTOMATIC1111 Jun 8, 2024
0c0d71a
Merge pull request #15907 from AUTOMATIC1111/fix-change-log
AUTOMATIC1111 Jun 8, 2024
de7f5cd
Merge pull request #15804 from huchenlei/rearrange_fix
AUTOMATIC1111 Jun 8, 2024
0769aa3
integrated edits as recommended in the PR #15804
AUTOMATIC1111 Jun 8, 2024
00f37ad
Merge pull request #15893 from alcacode/dev
AUTOMATIC1111 Jun 8, 2024
e21b1e3
Merge pull request #15864 from AUTOMATIC1111/ReloadUI-backgroundColor…
AUTOMATIC1111 Jun 8, 2024
0edc04d
Merge branch 'dev' into patch-2
AUTOMATIC1111 Jun 8, 2024
9e1fc80
Merge pull request #15608 from drhead/patch-2
AUTOMATIC1111 Jun 8, 2024
b150b3a
Merge pull request #15607 from drhead/patch-1
AUTOMATIC1111 Jun 8, 2024
5429e4c
add proper infotext support for #15607
AUTOMATIC1111 Jun 8, 2024
15245d9
Merge pull request #15600 from AUTOMATIC1111/fix-corrupt-model-loop
AUTOMATIC1111 Jun 8, 2024
cd9e9e4
remove unneeded tabulation
AUTOMATIC1111 Jun 8, 2024
ba54c74
Merge pull request #15656 from AUTOMATIC1111/api-old-sampler-names
AUTOMATIC1111 Jun 8, 2024
33b73c4
Merge pull request #15820 from huchenlei/force_half
AUTOMATIC1111 Jun 8, 2024
ebfc9f6
Merge branch 'dev' into patch-4
AUTOMATIC1111 Jun 8, 2024
93b53dc
Merge pull request #15824 from drhead/patch-4
AUTOMATIC1111 Jun 8, 2024
616013f
Merge pull request #15851 from viking1304/torch-on-mac
AUTOMATIC1111 Jun 8, 2024
cbac72d
Merge pull request #15836 from AUTOMATIC1111/xyz-override-rework
AUTOMATIC1111 Jun 8, 2024
c3c90de
Merge pull request #15681 from AUTOMATIC1111/fix_p_invalid_sampler_an…
AUTOMATIC1111 Jun 8, 2024
96f907e
Merge branch 'dev' into fix-Hypertile-xyz
AUTOMATIC1111 Jun 8, 2024
04164a8
Merge pull request #15831 from AUTOMATIC1111/fix-Hypertile-xyz
AUTOMATIC1111 Jun 8, 2024
5977cb0
Merge pull request #15832 from AUTOMATIC1111/xyz-csv-skipinitialspace
AUTOMATIC1111 Jun 8, 2024
510f025
replace wsl-open with wslpath and explorer.exe
w-e-w Jun 3, 2024
ad229fa
Merge pull request #15803 from huchenlei/checkpoint_false
AUTOMATIC1111 Jun 8, 2024
603509e
as per wfjsw's suggestion, revert changes for sd_hijack_checkpoint.py
AUTOMATIC1111 Jun 8, 2024
371cb60
Merge pull request #15830 from light-and-ray/scroll_extensions_table_…
AUTOMATIC1111 Jun 8, 2024
816bc42
Merge pull request #15816 from huchenlei/bias_backup
AUTOMATIC1111 Jun 8, 2024
64bf57b
Merge pull request #15817 from light-and-ray/img2img_batch_upload
AUTOMATIC1111 Jun 8, 2024
6450d24
Merge pull request #15806 from huchenlei/inpaint_fix
AUTOMATIC1111 Jun 8, 2024
b4723bb
Merge pull request #15815 from AUTOMATIC1111/torch-float64-or-float32
AUTOMATIC1111 Jun 8, 2024
7b940e3
Merge pull request #15797 from AUTOMATIC1111/fix-extention-update-whe…
AUTOMATIC1111 Jun 8, 2024
88a5001
Merge branch 'dev' into dev
AUTOMATIC1111 Jun 8, 2024
9905341
Merge pull request #15783 from elf-mouse/dev
AUTOMATIC1111 Jun 8, 2024
07cf95c
update pickle safe filenames
AUTOMATIC1111 Jun 8, 2024
64ebb24
Merge pull request #15757 from AUTOMATIC1111/fix-fonts-with-subpath-
AUTOMATIC1111 Jun 8, 2024
5abdf51
Merge pull request #15750 from MarcusNyne/m9-240509-pip-upgrade
AUTOMATIC1111 Jun 8, 2024
c1c4b3f
Merge pull request #15738 from JLipnerPitt/JLipnerPitt-patch-1
AUTOMATIC1111 Jun 8, 2024
64783dd
Merge pull request #15742 from MarcusNyne/m9-240508-model-dir
AUTOMATIC1111 Jun 8, 2024
1a7ffa2
remove extra local variable
AUTOMATIC1111 Jun 8, 2024
debc6dd
Merge pull request #15739 from LoganBooker/LoganBooker-AVIF-mimetype-…
AUTOMATIC1111 Jun 8, 2024
4aebfe9
Merge pull request #15730 from bluelovers/patch-2
AUTOMATIC1111 Jun 8, 2024
74b1fc6
Merge pull request #15682 from light-and-ray/two_fingers_press_to_ope…
AUTOMATIC1111 Jun 8, 2024
b9dfc50
Merge pull request #15705 from AUTOMATIC1111/use-script_path-for-webu…
AUTOMATIC1111 Jun 8, 2024
a1130c2
Merge pull request #15664 from AUTOMATIC1111/fix-extra-batch-mode-P-T…
AUTOMATIC1111 Jun 8, 2024
742bfbe
Merge pull request #15679 from AUTOMATIC1111/lora-bundled-TI-infotext
AUTOMATIC1111 Jun 8, 2024
41b24d3
Merge pull request #15680 from light-and-ray/use_gradio_theme_colors_…
AUTOMATIC1111 Jun 8, 2024
9e51031
Merge pull request #15641 from AUTOMATIC1111/no-referrer
AUTOMATIC1111 Jun 8, 2024
a184e5d
Merge pull request #15657 from AUTOMATIC1111/drag-text-fix
AUTOMATIC1111 Jun 8, 2024
569f17c
Merge pull request #15654 from huchenlei/mime
AUTOMATIC1111 Jun 8, 2024
30461be
Merge pull request #15602 from AUTOMATIC1111/initial-model-download-i…
AUTOMATIC1111 Jun 8, 2024
3ef9f27
Merge branch 'dev' into bgh-handle-metadata-issues-more-cleanly
AUTOMATIC1111 Jun 8, 2024
6d8d272
Merge pull request #15632 from brendanhoar/bgh-handle-metadata-issues…
AUTOMATIC1111 Jun 8, 2024
2dbc7aa
Merge pull request #15627 from light-and-ray/more_extension_tag_filte…
AUTOMATIC1111 Jun 8, 2024
5ecfc20
Merge pull request #15610 from pinanew/pinanew-patch-1
AUTOMATIC1111 Jun 8, 2024
194c262
Merge pull request #15968 from AUTOMATIC1111/wsl-open
AUTOMATIC1111 Jun 8, 2024
547778b
possibly make NaN check cheaper
AUTOMATIC1111 Jun 8, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
## 1.9.4

### Bug Fixes:
* pin setuptools version to fix the startup error ([#15883](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15883))
* pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882))

## 1.9.3

Expand Down
2 changes: 1 addition & 1 deletion configs/alt-diffusion-inference.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ model:
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
use_checkpoint: False
legacy: False

first_stage_config:
Expand Down
2 changes: 1 addition & 1 deletion configs/alt-diffusion-m18-inference.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ model:
use_linear_in_transformer: True
transformer_depth: 1
context_dim: 1024
use_checkpoint: True
use_checkpoint: False
legacy: False

first_stage_config:
Expand Down
2 changes: 1 addition & 1 deletion configs/instruct-pix2pix.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ model:
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
use_checkpoint: False
legacy: False

first_stage_config:
Expand Down
2 changes: 1 addition & 1 deletion configs/sd_xl_inpaint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ model:
params:
adm_in_channels: 2816
num_classes: sequential
use_checkpoint: True
use_checkpoint: False
in_channels: 9
out_channels: 4
model_channels: 320
Expand Down
2 changes: 1 addition & 1 deletion configs/v1-inference.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ model:
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
use_checkpoint: False
legacy: False

first_stage_config:
Expand Down
2 changes: 1 addition & 1 deletion configs/v1-inpainting-inference.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ model:
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
use_checkpoint: False
legacy: False

first_stage_config:
Expand Down
56 changes: 45 additions & 11 deletions extensions-builtin/Lora/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,14 @@ def assign_network_names_to_compvis_modules(sd_model):
sd_model.network_layer_mapping = network_layer_mapping


class BundledTIHash(str):
def __init__(self, hash_str):
self.hash = hash_str

def __str__(self):
return self.hash if shared.opts.lora_bundled_ti_to_infotext else ''


def load_network(name, network_on_disk):
net = network.Network(name, network_on_disk)
net.mtime = os.path.getmtime(network_on_disk.filename)
Expand Down Expand Up @@ -229,6 +237,7 @@ def load_network(name, network_on_disk):
for emb_name, data in bundle_embeddings.items():
embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name)
embedding.loaded = None
embedding.shorthash = BundledTIHash(name)
embeddings[emb_name] = embedding

net.bundle_embeddings = embeddings
Expand Down Expand Up @@ -260,6 +269,16 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No

loaded_networks.clear()

unavailable_networks = []
for name in names:
if name.lower() in forbidden_network_aliases and available_networks.get(name) is None:
unavailable_networks.append(name)
elif available_network_aliases.get(name) is None:
unavailable_networks.append(name)

if unavailable_networks:
update_available_networks_by_names(unavailable_networks)

networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
if any(x is None for x in networks_on_disk):
list_available_networks()
Expand Down Expand Up @@ -378,13 +397,18 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
self.network_weights_backup = weights_backup

bias_backup = getattr(self, "network_bias_backup", None)
if bias_backup is None:
if bias_backup is None and wanted_names != ():
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
elif getattr(self, 'bias', None) is not None:
bias_backup = self.bias.to(devices.cpu, copy=True)
else:
bias_backup = None

# Unlike weight which always has value, some modules don't have bias.
# Only report if bias is not None and current bias are not unchanged.
if bias_backup is not None and current_names != ():
raise RuntimeError("no backup bias found and current bias are not unchanged")
self.network_bias_backup = bias_backup

if current_names != wanted_names:
Expand Down Expand Up @@ -566,22 +590,16 @@ def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)


def list_available_networks():
available_networks.clear()
available_network_aliases.clear()
forbidden_network_aliases.clear()
available_network_hash_lookup.clear()
forbidden_network_aliases.update({"none": 1, "Addams": 1})

os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)

def process_network_files(names: list[str] | None = None):
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in candidates:
if os.path.isdir(filename):
continue

name = os.path.splitext(os.path.basename(filename))[0]
# if names is provided, only load networks with names in the list
if names and name not in names:
continue
try:
entry = network.NetworkOnDisk(name, filename)
except OSError: # should catch FileNotFoundError and PermissionError etc.
Expand All @@ -597,6 +615,22 @@ def list_available_networks():
available_network_aliases[entry.alias] = entry


def update_available_networks_by_names(names: list[str]):
process_network_files(names)


def list_available_networks():
available_networks.clear()
available_network_aliases.clear()
forbidden_network_aliases.clear()
available_network_hash_lookup.clear()
forbidden_network_aliases.update({"none": 1, "Addams": 1})

os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)

process_network_files()


re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")


Expand Down
1 change: 1 addition & 0 deletions extensions-builtin/Lora/scripts/lora_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def before_ui():
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
"lora_bundled_ti_to_infotext": shared.OptionInfo(True, "Add Lora name as TI hashes for bundled Textual Inversion").info('"Add Textual Inversion hashes to infotext" needs to be enabled'),
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
Expand Down
10 changes: 6 additions & 4 deletions extensions-builtin/Lora/ui_edit_user_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,12 @@ def is_non_comma_tagset(tags):
def build_tags(metadata):
tags = {}

for _, tags_dict in metadata.get("ss_tag_frequency", {}).items():
for tag, tag_count in tags_dict.items():
tag = tag.strip()
tags[tag] = tags.get(tag, 0) + int(tag_count)
ss_tag_frequency = metadata.get("ss_tag_frequency", {})
if ss_tag_frequency is not None and hasattr(ss_tag_frequency, 'items'):
for _, tags_dict in ss_tag_frequency.items():
for tag, tag_count in tags_dict.items():
tag = tag.strip()
tags[tag] = tags.get(tag, 0) + int(tag_count)

if tags and is_non_comma_tagset(tags):
new_tags = {}
Expand Down
2 changes: 1 addition & 1 deletion extensions-builtin/Lora/ui_extra_networks_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def create_item(self, name, index=None, enable_filter=True):
else:
sd_version = lora_on_disk.sd_version

if shared.opts.lora_show_all or not enable_filter:
if shared.opts.lora_show_all or not enable_filter or not shared.sd_model:
pass
elif sd_version == network.SdVersion.Unknown:
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
Expand Down
17 changes: 15 additions & 2 deletions extensions-builtin/hypertile/scripts/hypertile_script.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import hypertile
from modules import scripts, script_callbacks, shared
from scripts.hypertile_xyz import add_axis_options


class ScriptHypertile(scripts.Script):
Expand Down Expand Up @@ -93,7 +92,6 @@ def on_ui_settings():
"hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"),
"hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"),
"hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"),

"hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"),
"hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"),
"hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"),
Expand All @@ -105,5 +103,20 @@ def on_ui_settings():
shared.opts.add_option(name, opt)


def add_axis_options():
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
xyz_grid.axis_options.extend([
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet_secondpass', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_unet"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] Unet Max Depth'), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_unet"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] Unet Max Tile Size')),
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_unet"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] Unet Swap Size')),
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, xyz_grid.apply_override('hypertile_enable_vae', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_vae"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] VAE Max Depth'), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_vae"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] VAE Max Tile Size')),
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_vae"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] VAE Swap Size')),
])


script_callbacks.on_ui_settings(on_ui_settings)
script_callbacks.on_before_ui(add_axis_options)
51 changes: 0 additions & 51 deletions extensions-builtin/hypertile/scripts/hypertile_xyz.py

This file was deleted.

9 changes: 4 additions & 5 deletions extensions-builtin/soft-inpainting/scripts/soft_inpainting.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import math
from modules.ui_components import InputAccordion
import modules.scripts as scripts
from modules.torch_utils import float64


class SoftInpaintingSettings:
Expand Down Expand Up @@ -79,13 +80,11 @@ def latent_blend(settings, a, b, t):

# Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.)
# 64-bit operations are used here to allow large exponents.
current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(torch.float64).add_(0.00001)
current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(float64(image_interp)).add_(0.00001)

# Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
settings.inpaint_detail_preservation) * one_minus_t3
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
settings.inpaint_detail_preservation) * t3
a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(float64(a)).pow_(settings.inpaint_detail_preservation) * one_minus_t3
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(float64(b)).pow_(settings.inpaint_detail_preservation) * t3
desired_magnitude = a_magnitude
desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation)
del a_magnitude, b_magnitude, t3, one_minus_t3
Expand Down
49 changes: 18 additions & 31 deletions javascript/contextMenus.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,6 @@ var contextMenuInit = function() {
};

function showContextMenu(event, element, menuEntries) {
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;

let oldMenu = gradioApp().querySelector('#context-menu');
if (oldMenu) {
oldMenu.remove();
Expand All @@ -23,10 +20,8 @@ var contextMenuInit = function() {
contextMenu.style.background = baseStyle.background;
contextMenu.style.color = baseStyle.color;
contextMenu.style.fontFamily = baseStyle.fontFamily;
contextMenu.style.top = posy + 'px';
contextMenu.style.left = posx + 'px';


contextMenu.style.top = event.pageY + 'px';
contextMenu.style.left = event.pageX + 'px';

const contextMenuList = document.createElement('ul');
contextMenuList.className = 'context-menu-items';
Expand All @@ -43,21 +38,6 @@ var contextMenuInit = function() {
});

gradioApp().appendChild(contextMenu);

let menuWidth = contextMenu.offsetWidth + 4;
let menuHeight = contextMenu.offsetHeight + 4;

let windowWidth = window.innerWidth;
let windowHeight = window.innerHeight;

if ((windowWidth - posx) < menuWidth) {
contextMenu.style.left = windowWidth - menuWidth + "px";
}

if ((windowHeight - posy) < menuHeight) {
contextMenu.style.top = windowHeight - menuHeight + "px";
}

}

function appendContextMenuOption(targetElementSelector, entryName, entryFunction) {
Expand Down Expand Up @@ -107,16 +87,23 @@ var contextMenuInit = function() {
oldMenu.remove();
}
});
gradioApp().addEventListener("contextmenu", function(e) {
let oldMenu = gradioApp().querySelector('#context-menu');
if (oldMenu) {
oldMenu.remove();
}
menuSpecs.forEach(function(v, k) {
if (e.composedPath()[0].matches(k)) {
showContextMenu(e, e.composedPath()[0], v);
e.preventDefault();
['contextmenu', 'touchstart'].forEach((eventType) => {
gradioApp().addEventListener(eventType, function(e) {
let ev = e;
if (eventType.startsWith('touch')) {
if (e.touches.length !== 2) return;
ev = e.touches[0];
}
let oldMenu = gradioApp().querySelector('#context-menu');
if (oldMenu) {
oldMenu.remove();
}
menuSpecs.forEach(function(v, k) {
if (e.composedPath()[0].matches(k)) {
showContextMenu(ev, e.composedPath()[0], v);
e.preventDefault();
}
});
});
});
eventListenerApplied = true;
Expand Down
Loading