update Dockerfile according to comments and move to separated folder

Co-authored-by: Lukas <lkoe@bluewin.ch>
Signed-off-by: Peter Pan <Peter.Pan@daocloud.io>
This commit is contained in:
Peter Pan 2024-12-22 01:03:22 -05:00
parent 50700212d7
commit 50a6693208
9 changed files with 136 additions and 29 deletions

View file

@ -1,2 +1,9 @@
models
models/
.venv
.che__/
*.log
*.git
*.gitignore
docker
models/*
embeddings/*
extensions/*

View file

@ -106,14 +106,43 @@ Alternatively, use online services (like Google Colab):
### Running with Docker
#### a) Run with Docker Compose
DISCLAIMER: This currently only works with NVIDIA GPUs
You need to have [Docker](https://www.docker.com/) installed on your system. Then clone this repository and execute `docker compose up` in the root of the repository. The first time you execute this command will take a long time as all the dependencies are installed. Subsequent runs of the command should start up the webui pretty much instantly. To stop the webui press CTRL+C and wait a few seconds.
You need to have [Docker](https://www.docker.com/) installed on your system. Then clone this repository and execute `docker compose -f docker/compose.yml up` in the root path of the repository. The first time you execute this command will take a long time as all the dependencies are installed. Subsequent runs of the command should start up the webui pretty much instantly. To stop the webui press CTRL+C and wait a few seconds.
Models are provided to the Docker container using a bind mount. This means that if you add a new model to the models directory it should be available in the webui after a checkpoint refresh without needing to rebuild or restart the container.
The server will be accessible at [localhost:7860](localhost:7860)
#### b) Run with docker CLI
```
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui
cd stable-diffusion-webui
export TAG=$(git describe --abbrev=0) # the image tag, here example uses the latest git tag
export IMG=AUTOMATIC1111/stable-diffusion-webui:${TAG}
docker build -t ${IMG} -f docker/Dockerfile .
docker run --gpus all -d -p 7860:7860 -v $(pwd)/models/:/webui/models -v $(pwd)/embeddings:/webui/models $IMG
# Those `-v` mean mounting your local pre-downloaded model weights, embeddings, extensions( you can also do this for your local textual_inversion_templates, localizations..etc ) to the container, in the same manner.
```
#### c) Run on Kubernetes
Prerequisite:
- You already have a Kubernetes Cluster in place and kube.conf in your machine.
- build the docker images as above step (b), and load it to your K8S cluster.
- Modify the `YOUR-IMAGE-NAME` and `YOUR-LOCAL-PATH` in `docker/k8s-sd-webui.yaml`
```
kubectl apply -f docker/k8s-sd-webui.yaml # Create k8s workload and nodeport service
kubectl get po -l app=stable-diffusion-webui # List the container
#kubectl wait --for=condition=available endpoints/stable-diffusion-webui-service # wait for pod ready, you can CTRL+C to skip it
kubectl get svc stable-diffusion-webui-service # To show the access NodePort port and access it thru K8S NodePort
```
To debug, you can check logs from `kubectl logs -f deploy/stable-diffusion-webui`
### Installation on Windows 10/11 with NVidia-GPUs using release package
1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents.
2. Run `update.bat`.

View file

@ -1,16 +0,0 @@
services:
webui:
build: .
volumes:
- type: bind
source: ./models
target: /webui/models
ports:
- 7860:7860
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]

View file

@ -1,4 +1,4 @@
FROM python:3.10-bookworm
FROM python:3.10
WORKDIR /webui
@ -14,6 +14,6 @@ RUN groupadd --system --gid 1000 webui && \
chown -R webui:webui .
USER 1000:1000
RUN ./webui.sh --prepare-environment-only --skip-torch-cuda-test
RUN ./webui.sh --exit --skip-torch-cuda-test
CMD [ "./webui.sh", "--skip-prepare-environment" ]
CMD [ "./webui.sh", "--skip-prepare-environment", "--listen" ]

28
docker/compose.yml Normal file
View file

@ -0,0 +1,28 @@
services:
webui:
build: .
volumes:
- type: bind
source: ./models
target: /webui/models
- type: bind
source: ./outputs
target: /webui/outputs
- type: bind
source: ./extensions
target: /webui/extensions
- type: bind
source: ./embeddings
target: /webui/embeddings
- type: bind
source: ./configs
target: /webui/configs
ports:
- 7860:7860
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]

64
docker/k8s-sd-webui.yaml Normal file
View file

@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stable-diffusion-webui
spec:
replicas: 1
selector:
matchLabels:
app: stable-diffusion-webui
template:
metadata:
labels:
app: stable-diffusion-webui
spec:
containers:
- name: stable-diffusion-webui
image: $(YOUR-IMAGE-NAME) # the image name specified when doing `docker build`
ports:
- containerPort: 7860
volumeMounts:
- mountPath: /webui/models
name: models-volume
- mountPath: /webui/outputs
name: image-outputs
- mountPath: /webui/extensions
name: extensions-volume
- mountPath: /webui/embeddings
name: embeddings-volume
resources:
limits:
nvidia.com/gpu: 1 # Adjust according to your needs
readinessProbe:
httpGet:
path: /
port: 7860
initialDelaySeconds: 120
periodSeconds: 30
volumes:
- name: models-volume
hostPath:
path: $(YOUR-LOCAL-PATH)/models # absolute path of pre-download model on the host machine
- name: image-outputs
hostPath:
path: $(YOUR-LOCAL-PATH)/outputs
- name: extensions-volume
hostPath:
path: $(YOUR-LOCAL-PATH)/extensions # absolute path of extensions
- name: embeddings-volume
hostPath:
path: $(YOUR-LOCAL-PATH)/embeddings # absolute path of pre-download embeddings
---
apiVersion: v1
kind: Service
metadata:
name: stable-diffusion-webui-service
spec:
type: NodePort # You can change this to LoadBalancer if needed
ports:
- port: 7860
targetPort: 7860
selector:
app: stable-diffusion-webui

View file

@ -34,9 +34,6 @@ def main():
launch_utils.startup_timer.record("initial startup")
if args.prepare_environment_only:
print("Setting up requirements wihout starting server as --setup-only flag was passed")
with launch_utils.startup_timer.subcategory("prepare environment"):
if not args.skip_prepare_environment:
prepare_environment()
@ -44,8 +41,7 @@ def main():
if args.test_server:
configure_for_tests()
if not args.prepare_environment_only:
start()
start()
if __name__ == "__main__":

View file

@ -126,4 +126,3 @@ parser.add_argument("--skip-load-model-at-start", action='store_true', help="if
parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system")
parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system')
parser.add_argument("--no-prompt-history", action='store_true', help="disable read prompt from last generation feature; settings this argument will not create '--data_path/params.txt' file")
parser.add_argument("--prepare-environment-only", action='store_true', help="launch.py argument: only prepare environment without launching webui run with --skip-torch-cuda-test")

View file

@ -10,7 +10,7 @@
#clone_dir="stable-diffusion-webui"
# Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention"
export COMMANDLINE_ARGS="--listen"
#export COMMANDLINE_ARGS=""
# python3 executable
#python_cmd="python3"