-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathloadModel.py
More file actions
89 lines (76 loc) · 2.69 KB
/
loadModel.py
File metadata and controls
89 lines (76 loc) · 2.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
import os
from diffusers import pipelines as _pipelines, StableDiffusionPipeline
from .getScheduler import getScheduler, DEFAULT_SCHEDULER
from .precision import torch_dtype_from_precision
from .device import device
import time
HF_AUTH_TOKEN = os.getenv("HF_AUTH_TOKEN")
PIPELINE = os.getenv("PIPELINE")
USE_DREAMBOOTH = True if os.getenv("USE_DREAMBOOTH") == "1" else False
HOME = os.path.expanduser("~")
# MODELS_DIR = os.path.join(HOME, ".cache", "huggingface\hub")
MODELS_DIR = "./models/"
MODEL_IDS = [
# "CompVis/stable-diffusion-v1-4",
# "hakurei/waifu-diffusion",
# # "hakurei/waifu-diffusion-v1-3", - not as diffusers yet
# "runwayml/stable-diffusion-inpainting",
# "runwayml/stable-diffusion-v1-5",
# "stabilityai/stable-diffusion-2"
# "stabilityai/stable-diffusion-2-base"
# "stabilityai/stable-diffusion-2-inpainting",
"Hius/DreamFul-V2"
]
def loadModel(model_id: str, load=True, precision=None, revision=None, send_opts={}):
# torch_dtype = torch_dtype_from_precision(precision)
torch_dtype = torch.float16
if revision == "":
revision = None
print(
"loadModel",
{
"model_id": model_id,
"load": load,
"precision": precision,
"revision": revision,
},
)
print(
("Loading" if load else "Downloading")
+ " model: "
+ model_id
+ (f" ({revision})" if revision else "")
)
pipeline = StableDiffusionPipeline
# pipeline = (
# StableDiffusionPipeline if PIPELINE == "ALL" else getattr(_pipelines, PIPELINE)
# )
scheduler = getScheduler(model_id, DEFAULT_SCHEDULER, not load)
# model_dir = os.path.join(MODELS_DIR, model_id)
model_dir = MODELS_DIR + model_id
if not os.path.isdir(model_dir):
model_dir = None
from_pretrained = time.time()
model = pipeline.from_pretrained(
model_dir or model_id,
revision=revision,
torch_dtype=torch_dtype,
use_auth_token=HF_AUTH_TOKEN,
scheduler=scheduler,
local_files_only=load,
# Work around https://github.com/huggingface/diffusers/issues/1246
# low_cpu_mem_usage=False if USE_DREAMBOOTH else True,
)
# model.enable_sequential_cpu_offload()
from_pretrained = round((time.time() - from_pretrained) * 1000)
if load:
to_gpu = time.time()
model.to(device)
if(device=="cpu"):
model.enable_sequential_cpu_offload()
to_gpu = round((time.time() - to_gpu) * 1000)
print(f"Loaded from disk in {from_pretrained} ms, to gpu in {to_gpu} ms")
else:
print(f"Downloaded in {from_pretrained} ms")
return model if load else None