From 50700212d770a8a650a37983794c16368f16ab25 Mon Sep 17 00:00:00 2001 From: Lukas Date: Wed, 27 Nov 2024 16:24:43 +0100 Subject: [PATCH 1/2] Added support for Docker. Container can easily be started with docker compose --- .dockerignore | 2 ++ Dockerfile | 19 +++++++++++++++++++ README.md | 10 ++++++++++ compose.yml | 16 ++++++++++++++++ launch.py | 6 +++++- modules/cmd_args.py | 1 + webui-user.sh | 2 +- 7 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 compose.yml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..f456d7997 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +models +models/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..86fd6d62f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,19 @@ +FROM python:3.10-bookworm + +WORKDIR /webui + +RUN apt-get update && \ + apt-get install ffmpeg libsm6 libxext6 dos2unix google-perftools -y + +COPY . . + +RUN dos2unix ./webui.sh ./webui-user.sh + +RUN groupadd --system --gid 1000 webui && \ + useradd webui --uid 1000 --gid 1000 --create-home --shell /bin/bash && \ + chown -R webui:webui . +USER 1000:1000 + +RUN ./webui.sh --prepare-environment-only --skip-torch-cuda-test + +CMD [ "./webui.sh", "--skip-prepare-environment" ] diff --git a/README.md b/README.md index bc62945c0..f9beae23a 100644 --- a/README.md +++ b/README.md @@ -104,6 +104,16 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) +### Running with Docker + +DISCLAIMER: This currently only works with NVIDIA GPUs + +You need to have [Docker](https://www.docker.com/) installed on your system. Then clone this repository and execute `docker compose up` in the root of the repository. The first time you execute this command will take a long time as all the dependencies are installed. Subsequent runs of the command should start up the webui pretty much instantly. To stop the webui press CTRL+C and wait a few seconds. + +Models are provided to the Docker container using a bind mount. This means that if you add a new model to the models directory it should be available in the webui after a checkpoint refresh without needing to rebuild or restart the container. + +The server will be accessible at [localhost:7860](localhost:7860) + ### Installation on Windows 10/11 with NVidia-GPUs using release package 1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents. 2. Run `update.bat`. diff --git a/compose.yml b/compose.yml new file mode 100644 index 000000000..2088e357c --- /dev/null +++ b/compose.yml @@ -0,0 +1,16 @@ +services: + webui: + build: . + volumes: + - type: bind + source: ./models + target: /webui/models + ports: + - 7860:7860 + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] \ No newline at end of file diff --git a/launch.py b/launch.py index f83820d25..d0565cb98 100644 --- a/launch.py +++ b/launch.py @@ -34,6 +34,9 @@ def main(): launch_utils.startup_timer.record("initial startup") + if args.prepare_environment_only: + print("Setting up requirements wihout starting server as --setup-only flag was passed") + with launch_utils.startup_timer.subcategory("prepare environment"): if not args.skip_prepare_environment: prepare_environment() @@ -41,7 +44,8 @@ def main(): if args.test_server: configure_for_tests() - start() + if not args.prepare_environment_only: + start() if __name__ == "__main__": diff --git a/modules/cmd_args.py b/modules/cmd_args.py index d71982b2c..ad05231d4 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -126,3 +126,4 @@ parser.add_argument("--skip-load-model-at-start", action='store_true', help="if parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system") parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system') parser.add_argument("--no-prompt-history", action='store_true', help="disable read prompt from last generation feature; settings this argument will not create '--data_path/params.txt' file") +parser.add_argument("--prepare-environment-only", action='store_true', help="launch.py argument: only prepare environment without launching webui run with --skip-torch-cuda-test") diff --git a/webui-user.sh b/webui-user.sh index 70306c60d..3db30fcba 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -10,7 +10,7 @@ #clone_dir="stable-diffusion-webui" # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" -#export COMMANDLINE_ARGS="" +export COMMANDLINE_ARGS="--listen" # python3 executable #python_cmd="python3" From 50a66932087e1d728fefb139adb5b90fcf1ceeaa Mon Sep 17 00:00:00 2001 From: Peter Pan Date: Sun, 22 Dec 2024 01:03:22 -0500 Subject: [PATCH 2/2] update Dockerfile according to comments and move to separated folder Co-authored-by: Lukas Signed-off-by: Peter Pan --- .dockerignore | 11 ++++-- README.md | 31 +++++++++++++++- compose.yml | 16 --------- Dockerfile => docker/Dockerfile | 6 ++-- docker/compose.yml | 28 +++++++++++++++ docker/k8s-sd-webui.yaml | 64 +++++++++++++++++++++++++++++++++ launch.py | 6 +--- modules/cmd_args.py | 1 - webui-user.sh | 2 +- 9 files changed, 136 insertions(+), 29 deletions(-) delete mode 100644 compose.yml rename Dockerfile => docker/Dockerfile (69%) create mode 100644 docker/compose.yml create mode 100644 docker/k8s-sd-webui.yaml diff --git a/.dockerignore b/.dockerignore index f456d7997..312fe056d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,9 @@ -models -models/ \ No newline at end of file +.venv +.che__/ +*.log +*.git +*.gitignore +docker +models/* +embeddings/* +extensions/* diff --git a/README.md b/README.md index f9beae23a..d48e147da 100644 --- a/README.md +++ b/README.md @@ -106,14 +106,43 @@ Alternatively, use online services (like Google Colab): ### Running with Docker +#### a) Run with Docker Compose DISCLAIMER: This currently only works with NVIDIA GPUs -You need to have [Docker](https://www.docker.com/) installed on your system. Then clone this repository and execute `docker compose up` in the root of the repository. The first time you execute this command will take a long time as all the dependencies are installed. Subsequent runs of the command should start up the webui pretty much instantly. To stop the webui press CTRL+C and wait a few seconds. +You need to have [Docker](https://www.docker.com/) installed on your system. Then clone this repository and execute `docker compose -f docker/compose.yml up` in the root path of the repository. The first time you execute this command will take a long time as all the dependencies are installed. Subsequent runs of the command should start up the webui pretty much instantly. To stop the webui press CTRL+C and wait a few seconds. Models are provided to the Docker container using a bind mount. This means that if you add a new model to the models directory it should be available in the webui after a checkpoint refresh without needing to rebuild or restart the container. The server will be accessible at [localhost:7860](localhost:7860) +#### b) Run with docker CLI +``` +git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui +cd stable-diffusion-webui +export TAG=$(git describe --abbrev=0) # the image tag, here example uses the latest git tag +export IMG=AUTOMATIC1111/stable-diffusion-webui:${TAG} +docker build -t ${IMG} -f docker/Dockerfile . +docker run --gpus all -d -p 7860:7860 -v $(pwd)/models/:/webui/models -v $(pwd)/embeddings:/webui/models $IMG +# Those `-v` mean mounting your local pre-downloaded model weights, embeddings, extensions( you can also do this for your local textual_inversion_templates, localizations..etc ) to the container, in the same manner. +``` + +#### c) Run on Kubernetes + +Prerequisite: + + - You already have a Kubernetes Cluster in place and kube.conf in your machine. + - build the docker images as above step (b), and load it to your K8S cluster. + - Modify the `YOUR-IMAGE-NAME` and `YOUR-LOCAL-PATH` in `docker/k8s-sd-webui.yaml` + +``` +kubectl apply -f docker/k8s-sd-webui.yaml # Create k8s workload and nodeport service +kubectl get po -l app=stable-diffusion-webui # List the container +#kubectl wait --for=condition=available endpoints/stable-diffusion-webui-service # wait for pod ready, you can CTRL+C to skip it +kubectl get svc stable-diffusion-webui-service # To show the access NodePort port and access it thru K8S NodePort +``` + +To debug, you can check logs from `kubectl logs -f deploy/stable-diffusion-webui` + ### Installation on Windows 10/11 with NVidia-GPUs using release package 1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents. 2. Run `update.bat`. diff --git a/compose.yml b/compose.yml deleted file mode 100644 index 2088e357c..000000000 --- a/compose.yml +++ /dev/null @@ -1,16 +0,0 @@ -services: - webui: - build: . - volumes: - - type: bind - source: ./models - target: /webui/models - ports: - - 7860:7860 - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: all - capabilities: [gpu] \ No newline at end of file diff --git a/Dockerfile b/docker/Dockerfile similarity index 69% rename from Dockerfile rename to docker/Dockerfile index 86fd6d62f..fb5431f08 100644 --- a/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.10-bookworm +FROM python:3.10 WORKDIR /webui @@ -14,6 +14,6 @@ RUN groupadd --system --gid 1000 webui && \ chown -R webui:webui . USER 1000:1000 -RUN ./webui.sh --prepare-environment-only --skip-torch-cuda-test +RUN ./webui.sh --exit --skip-torch-cuda-test -CMD [ "./webui.sh", "--skip-prepare-environment" ] +CMD [ "./webui.sh", "--skip-prepare-environment", "--listen" ] diff --git a/docker/compose.yml b/docker/compose.yml new file mode 100644 index 000000000..85bafbf41 --- /dev/null +++ b/docker/compose.yml @@ -0,0 +1,28 @@ +services: + webui: + build: . + volumes: + - type: bind + source: ./models + target: /webui/models + - type: bind + source: ./outputs + target: /webui/outputs + - type: bind + source: ./extensions + target: /webui/extensions + - type: bind + source: ./embeddings + target: /webui/embeddings + - type: bind + source: ./configs + target: /webui/configs + ports: + - 7860:7860 + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] diff --git a/docker/k8s-sd-webui.yaml b/docker/k8s-sd-webui.yaml new file mode 100644 index 000000000..fcf7e1cd6 --- /dev/null +++ b/docker/k8s-sd-webui.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: stable-diffusion-webui +spec: + replicas: 1 + selector: + matchLabels: + app: stable-diffusion-webui + template: + metadata: + labels: + app: stable-diffusion-webui + spec: + containers: + - name: stable-diffusion-webui + image: $(YOUR-IMAGE-NAME) # the image name specified when doing `docker build` + ports: + - containerPort: 7860 + volumeMounts: + - mountPath: /webui/models + name: models-volume + - mountPath: /webui/outputs + name: image-outputs + - mountPath: /webui/extensions + name: extensions-volume + - mountPath: /webui/embeddings + name: embeddings-volume + resources: + limits: + nvidia.com/gpu: 1 # Adjust according to your needs + readinessProbe: + httpGet: + path: / + port: 7860 + initialDelaySeconds: 120 + periodSeconds: 30 + volumes: + - name: models-volume + hostPath: + path: $(YOUR-LOCAL-PATH)/models # absolute path of pre-download model on the host machine + - name: image-outputs + hostPath: + path: $(YOUR-LOCAL-PATH)/outputs + - name: extensions-volume + hostPath: + path: $(YOUR-LOCAL-PATH)/extensions # absolute path of extensions + - name: embeddings-volume + hostPath: + path: $(YOUR-LOCAL-PATH)/embeddings # absolute path of pre-download embeddings + + +--- +apiVersion: v1 +kind: Service +metadata: + name: stable-diffusion-webui-service +spec: + type: NodePort # You can change this to LoadBalancer if needed + ports: + - port: 7860 + targetPort: 7860 + selector: + app: stable-diffusion-webui diff --git a/launch.py b/launch.py index d0565cb98..f83820d25 100644 --- a/launch.py +++ b/launch.py @@ -34,9 +34,6 @@ def main(): launch_utils.startup_timer.record("initial startup") - if args.prepare_environment_only: - print("Setting up requirements wihout starting server as --setup-only flag was passed") - with launch_utils.startup_timer.subcategory("prepare environment"): if not args.skip_prepare_environment: prepare_environment() @@ -44,8 +41,7 @@ def main(): if args.test_server: configure_for_tests() - if not args.prepare_environment_only: - start() + start() if __name__ == "__main__": diff --git a/modules/cmd_args.py b/modules/cmd_args.py index ad05231d4..d71982b2c 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -126,4 +126,3 @@ parser.add_argument("--skip-load-model-at-start", action='store_true', help="if parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system") parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system') parser.add_argument("--no-prompt-history", action='store_true', help="disable read prompt from last generation feature; settings this argument will not create '--data_path/params.txt' file") -parser.add_argument("--prepare-environment-only", action='store_true', help="launch.py argument: only prepare environment without launching webui run with --skip-torch-cuda-test") diff --git a/webui-user.sh b/webui-user.sh index 3db30fcba..70306c60d 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -10,7 +10,7 @@ #clone_dir="stable-diffusion-webui" # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" -export COMMANDLINE_ARGS="--listen" +#export COMMANDLINE_ARGS="" # python3 executable #python_cmd="python3"