diff --git a/ldm/generate.py b/ldm/generate.py index 7da9848a5fb..48c5a693ca1 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -22,6 +22,7 @@ from diffusers.pipeline_utils import DiffusionPipeline from diffusers.utils.import_utils import is_xformers_available from omegaconf import OmegaConf +from pathlib import Path from PIL import Image, ImageOps from pytorch_lightning import logging, seed_everything @@ -991,8 +992,17 @@ def set_model(self, model_name): self.model_name = model_name self._set_sampler() # requires self.model_name to be set first + self._save_last_used_model(model_name) return self.model + def _save_last_used_model(self,model_name:str): + """ + Save name of the last model used. + """ + model_file_path = Path(Globals.root,'.last_model') + with open(model_file_path,'w') as f: + f.write(model_name) + def load_huggingface_concepts(self, concepts: list[str]): self.model.textual_inversion_manager.load_huggingface_concepts(concepts) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index f1e10938baa..001c7e65309 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -181,7 +181,6 @@ def main(): # web server loops forever if opt.web or opt.gui: invoke_ai_web_server_loop(gen, gfpgan, codeformer, esrgan) - save_last_used_model(gen.model_name) sys.exit(0) if not infile: @@ -502,7 +501,6 @@ def image_writer( print( f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}' ) - save_last_used_model(gen.model_name) # TO DO: remove repetitive code and the awkward command.replace() trope @@ -1300,14 +1298,6 @@ def retrieve_last_used_model()->str: with open(model_file_path,'r') as f: return f.readline() -def save_last_used_model(model_name:str): - """ - Save name of the last model used. - """ - model_file_path = Path(Globals.root,'.last_model') - with open(model_file_path,'w') as f: - f.write(model_name) - # This routine performs any patch-ups needed after installation def run_patches(): install_missing_config_files()