parser.add_argument("--config",type=str,default=os.path.join(sd_path,"configs/stable-diffusion/v1-inference.yaml"),help="path to config which constructs model",)
parser.add_argument("--ckpt",type=str,default=sd_model_file,help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
# This should be deprecated, but we'll leave it for a few iterations
parser.add_argument("--ckpt-dir",type=str,default=None,help="Path to directory with stable diffusion checkpoints (Deprecated, use '--stablediffusion-models-path'",)
parser.add_argument("--no-progressbar-hiding",action='store_true',help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--medvram",action='store_true',help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram",action='store_true',help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond",action='store_true',help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--precision",type=str,help="evaluate at this precision",choices=["full","autocast"],default="autocast")
parser.add_argument("--share",action='store_true',help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--codeformer-models-path",type=str,help="Path to directory with codeformer model file(s).",default=os.path.join(model_path,'Codeformer'))
parser.add_argument("--gfpgan-models-path",type=str,help="Path to directory with GFPGAN model file(s).",default=os.path.join(model_path,'GFPGAN'))
parser.add_argument("--esrgan-models-path",type=str,help="Path to directory with ESRGAN model file(s).",default=os.path.join(model_path,'ESRGAN'))
parser.add_argument("--realesrgan-models-path",type=str,help="Path to directory with RealESRGAN model file(s).",default=os.path.join(model_path,'RealESRGAN'))
parser.add_argument("--stablediffusion-models-path",type=str,help="Path to directory with Stable-diffusion checkpoints.",default=os.path.join(model_path,'SwinIR'))
parser.add_argument("--swinir-models-path",type=str,help="Path to directory with SwinIR model file(s).",default=os.path.join(model_path,'SwinIR'))
parser.add_argument("--ldsr-models-path",type=str,help="Path to directory with LDSR model file(s).",default=os.path.join(model_path,'LDSR'))
parser.add_argument("--opt-split-attention",action='store_true',help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--opt-split-attention-v1",action='store_true',help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--port",type=int,help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available",default=None)
parser.add_argument("--gradio-auth",type=str,help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"',default=None)
parser.add_argument("--use-textbox-seed",action='store_true',help="use textbox for seeds in UI (no up/down, but possible to input long seeds)",default=False)
"ESRGAN_tile":OptionInfo(192,"Tile size for ESRGAN upscalers. 0 = no tiling.",gr.Slider,{"minimum":0,"maximum":512,"step":16}),
"ESRGAN_tile_overlap":OptionInfo(8,"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.",gr.Slider,{"minimum":0,"maximum":48,"step":1}),
"realesrgan_enabled_models":OptionInfo(["Real-ESRGAN 4x plus","Real-ESRGAN 4x plus anime 6B"],"Select which RealESRGAN models to show in the web UI. (Requires restart)",gr.CheckboxGroup,lambda:{"choices":realesrgan_models_names()}),
"SWIN_tile":OptionInfo(192,"Tile size for all SwinIR.",gr.Slider,{"minimum":16,"maximum":512,"step":16}),
"SWIN_tile_overlap":OptionInfo(8,"Tile overlap, in pixels for SwinIR. Low values = visible seam.",gr.Slider,{"minimum":0,"maximum":48,"step":1}),
"img2img_fix_steps":OptionInfo(False,"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization":OptionInfo(False,"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis":OptionInfo(True,"Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds":OptionInfo(True,"Make K-diffusion samplers produce same images in a batch as when making a single image"),
"random_artist_categories":OptionInfo([],"Allowed categories for random artists selection when using the Roll button",gr.CheckboxGroup,{"choices":artist_db.categories()}),
"show_progress_every_n_steps":OptionInfo(0,"Show show image creation progress every N sampling steps. Set 0 to disable.",gr.Slider,{"minimum":0,"maximum":32,"step":1}),
"return_grid":OptionInfo(True,"Show grid in results for web"),
"add_model_hash_to_info":OptionInfo(True,"Add model hash to generation information"),
"font":OptionInfo("","Font for image grids that have text"),
"js_modal_lightbox":OptionInfo(True,"Enable full page image viewer"),
"js_modal_lightbox_initialy_zoomed":OptionInfo(True,"Show images zoomed in by default in full page image viewer"),
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})",file=sys.stderr)
bad_settings+=1
ifbad_settings>0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.",file=sys.stderr)