From 75b62d6ca8abacb4174212cb17cb5a130c8a7f05 Mon Sep 17 00:00:00 2001 From: whosawhatsis Date: Sat, 4 Feb 2023 19:56:20 -0800 Subject: [PATCH 1/5] Add --log_tokenization to sysargs This allows the --log_tokenization option to be used as a command line argument (or from invokeai.init), making it possible to view tokenization information in the terminal when using the web interface. --- ldm/invoke/args.py | 9 ++++++++- ldm/invoke/conditioning.py | 5 +++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 3904d2f573d..47075654244 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -196,6 +196,7 @@ def parse_args(self): elif os.path.exists(legacyinit): print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.') sysargs.insert(0,f'@{legacyinit}') + Globals.log_tokenization = self._arg_parser.parse_args(sysargs).log_tokenization self._arg_switches = self._arg_parser.parse_args(sysargs) return self._arg_switches @@ -599,6 +600,12 @@ def _create_arg_parser(self): help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}', default='k_lms', ) + render_group.add_argument( + '--log_tokenization', + '-t', + action='store_true', + help='shows how the prompt is split into tokens' + ) render_group.add_argument( '-f', '--strength', @@ -744,7 +751,7 @@ def _create_dream_cmd_parser(self): invoke> !fetch 0000015.8929913.png invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5 invoke> !fetch /path/to/images/*.png prompts.txt - + !replay /path/to/prompts.txt Replays all the prompts contained in the file prompts.txt. diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index fec3c7e7b1f..54ed10bc574 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -17,6 +17,7 @@ from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder from ..modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter +from ldm.invoke.globals import Globals def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False): @@ -92,7 +93,7 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info) """ - if log_tokens: + if log_tokens or Globals.log_tokenization: print(f">> Parsed prompt to {parsed_prompt}") print(f">> Parsed negative prompt to {parsed_negative_prompt}") @@ -235,7 +236,7 @@ def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedProm fragments = [x.text for x in flattened_prompt.children] weights = [x.weight for x in flattened_prompt.children] embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights]) - if log_tokens: + if log_tokens or Globals.log_tokenization: text = " ".join(fragments) log_tokenization(text, model, display_label=log_display_label) From f1dd76c20b9dd853aa36f95e066c235a4c33f589 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:10 +1300 Subject: [PATCH 2/5] Remove Deprecation Warning from Diffusers Pipeline --- ldm/invoke/generator/diffusers_pipeline.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ldm/invoke/generator/diffusers_pipeline.py b/ldm/invoke/generator/diffusers_pipeline.py index a63159b118e..f8efe03762c 100644 --- a/ldm/invoke/generator/diffusers_pipeline.py +++ b/ldm/invoke/generator/diffusers_pipeline.py @@ -4,7 +4,6 @@ import inspect import secrets import sys -import warnings from dataclasses import dataclass, field from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any @@ -641,7 +640,6 @@ def get_learned_conditioning(self, c: List[List[str]], *, return_tokens=True, fr @property def cond_stage_model(self): - warnings.warn("legacy compatibility layer", DeprecationWarning) return self.prompt_fragments_to_embeddings_converter @torch.inference_mode() From f7532cdfd4b0ea08ef5d2c196815583d58992128 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:29 +1300 Subject: [PATCH 3/5] Beautify Token Log Outputs --- ldm/invoke/conditioning.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index 54ed10bc574..99722ff388f 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -94,8 +94,8 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p """ if log_tokens or Globals.log_tokenization: - print(f">> Parsed prompt to {parsed_prompt}") - print(f">> Parsed negative prompt to {parsed_negative_prompt}") + print(f"\n>> [TOKENLOG] Parsed Prompt: {parsed_prompt}") + print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {parsed_negative_prompt}") conditioning = None cac_args: cross_attention_control.Arguments = None @@ -274,12 +274,12 @@ def log_tokenization(text, model, display_label=None): # usually tokens have '' to indicate end-of-word, # but for readability it has been replaced with ' ' """ - tokens = model.cond_stage_model.tokenizer.tokenize(text) tokenized = "" discarded = "" usedTokens = 0 totalTokens = len(tokens) + for i in range(0, totalTokens): token = tokens[i].replace('', ' ') # alternate color @@ -289,8 +289,11 @@ def log_tokenization(text, model, display_label=None): usedTokens += 1 else: # over max token length discarded = discarded + f"\x1b[0;3{s};40m{token}" - print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m") + + if usedTokens > 0: + print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):') + print(f'{tokenized}\x1b[0m') + if discarded != "": - print( - f">> Tokens Discarded ({totalTokens - usedTokens}):\n{discarded}\x1b[0m" - ) + print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):') + print(f'{discarded}\x1b[0m') \ No newline at end of file From bf4344be5137868a6ca04c4b0d52aaccb1dd49e7 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:40 +1300 Subject: [PATCH 4/5] Beautify Usage Stats Log --- ldm/generate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldm/generate.py b/ldm/generate.py index c421a52802e..002ba47a97b 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -574,7 +574,7 @@ def process_image(image,seed): print('>> Could not generate image.') toc = time.time() - print('>> Usage stats:') + print('\n>> Usage stats:') print( f'>> {len(results)} image(s) generated in', '%4.2fs' % ( toc - tic) From 9c8fcaaf864a4ca00b875150dbb14379a5099a71 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:57 +1300 Subject: [PATCH 5/5] Beautify & Cleanup WebUI Logs --- invokeai/backend/invoke_ai_web_server.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index 6ec8098f593..9dd18ebe65d 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -626,9 +626,10 @@ def handle_generate_image_event( printable_parameters["init_mask"][:64] + "..." ) - print( - f">> Image generation requested: {printable_parameters}\nESRGAN parameters: {esrgan_parameters}\nFacetool parameters: {facetool_parameters}" - ) + print(f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n') + print(f'>> ESRGAN Parameters: {esrgan_parameters}') + print(f'>> Facetool Parameters: {facetool_parameters}') + self.generate_images( generation_parameters, esrgan_parameters, @@ -1154,7 +1155,7 @@ def image_done(image, seed, first_seed, attention_maps_image=None): image, os.path.basename(path), self.thumbnail_image_path ) - print(f'>> Image generated: "{path}"') + print(f'\n\n>> Image generated: "{path}"\n') self.write_log_message(f'[Generated] "{path}": {command}') if progress.total_iterations > progress.current_iteration: @@ -1193,8 +1194,6 @@ def image_done(image, seed, first_seed, attention_maps_image=None): progress.set_current_iteration(progress.current_iteration + 1) - print(generation_parameters) - def diffusers_step_callback_adapter(*cb_args, **kwargs): if isinstance(cb_args[0], PipelineIntermediateState): progress_state: PipelineIntermediateState = cb_args[0] @@ -1305,8 +1304,6 @@ def parameters_to_generated_image_metadata(self, parameters): rfc_dict["variations"] = variations - print(parameters) - if rfc_dict["type"] == "img2img": rfc_dict["strength"] = parameters["strength"] rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant