diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..312fe056d --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +.venv +.che__/ +*.log +*.git +*.gitignore +docker +models/* +embeddings/* +extensions/* diff --git a/README.md b/README.md index a93079fd1..661977d73 100644 --- a/README.md +++ b/README.md @@ -104,6 +104,45 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) +### Running with Docker + +#### a) Run with Docker Compose +DISCLAIMER: This currently only works with NVIDIA GPUs + +You need to have [Docker](https://www.docker.com/) installed on your system. Then clone this repository and execute `docker compose -f docker/compose.yml up` in the root path of the repository. The first time you execute this command will take a long time as all the dependencies are installed. Subsequent runs of the command should start up the webui pretty much instantly. To stop the webui press CTRL+C and wait a few seconds. + +Models are provided to the Docker container using a bind mount. This means that if you add a new model to the models directory it should be available in the webui after a checkpoint refresh without needing to rebuild or restart the container. + +The server will be accessible at [localhost:7860](localhost:7860) + +#### b) Run with docker CLI +``` +git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui +cd stable-diffusion-webui +export TAG=$(git describe --abbrev=0) # the image tag, here example uses the latest git tag +export IMG=AUTOMATIC1111/stable-diffusion-webui:${TAG} +docker build -t ${IMG} -f docker/Dockerfile . +docker run --gpus all -d -p 7860:7860 -v $(pwd)/models/:/webui/models -v $(pwd)/embeddings:/webui/models $IMG +# Those `-v` mean mounting your local pre-downloaded model weights, embeddings, extensions( you can also do this for your local textual_inversion_templates, localizations..etc ) to the container, in the same manner. +``` + +#### c) Run on Kubernetes + +Prerequisite: + + - You already have a Kubernetes Cluster in place and kube.conf in your machine. + - build the docker images as above step (b), and load it to your K8S cluster. + - Modify the `YOUR-IMAGE-NAME` and `YOUR-LOCAL-PATH` in `docker/k8s-sd-webui.yaml` + +``` +kubectl apply -f docker/k8s-sd-webui.yaml # Create k8s workload and nodeport service +kubectl get po -l app=stable-diffusion-webui # List the container +#kubectl wait --for=condition=available endpoints/stable-diffusion-webui-service # wait for pod ready, you can CTRL+C to skip it +kubectl get svc stable-diffusion-webui-service # To show the access NodePort port and access it thru K8S NodePort +``` + +To debug, you can check logs from `kubectl logs -f deploy/stable-diffusion-webui` + ### Installation on Windows 10/11 with NVidia-GPUs using release package 1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents. 2. Run `update.bat`. diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..fb5431f08 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,19 @@ +FROM python:3.10 + +WORKDIR /webui + +RUN apt-get update && \ + apt-get install ffmpeg libsm6 libxext6 dos2unix google-perftools -y + +COPY . . + +RUN dos2unix ./webui.sh ./webui-user.sh + +RUN groupadd --system --gid 1000 webui && \ + useradd webui --uid 1000 --gid 1000 --create-home --shell /bin/bash && \ + chown -R webui:webui . +USER 1000:1000 + +RUN ./webui.sh --exit --skip-torch-cuda-test + +CMD [ "./webui.sh", "--skip-prepare-environment", "--listen" ] diff --git a/docker/compose.yml b/docker/compose.yml new file mode 100644 index 000000000..85bafbf41 --- /dev/null +++ b/docker/compose.yml @@ -0,0 +1,28 @@ +services: + webui: + build: . + volumes: + - type: bind + source: ./models + target: /webui/models + - type: bind + source: ./outputs + target: /webui/outputs + - type: bind + source: ./extensions + target: /webui/extensions + - type: bind + source: ./embeddings + target: /webui/embeddings + - type: bind + source: ./configs + target: /webui/configs + ports: + - 7860:7860 + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] diff --git a/docker/k8s-sd-webui.yaml b/docker/k8s-sd-webui.yaml new file mode 100644 index 000000000..fcf7e1cd6 --- /dev/null +++ b/docker/k8s-sd-webui.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: stable-diffusion-webui +spec: + replicas: 1 + selector: + matchLabels: + app: stable-diffusion-webui + template: + metadata: + labels: + app: stable-diffusion-webui + spec: + containers: + - name: stable-diffusion-webui + image: $(YOUR-IMAGE-NAME) # the image name specified when doing `docker build` + ports: + - containerPort: 7860 + volumeMounts: + - mountPath: /webui/models + name: models-volume + - mountPath: /webui/outputs + name: image-outputs + - mountPath: /webui/extensions + name: extensions-volume + - mountPath: /webui/embeddings + name: embeddings-volume + resources: + limits: + nvidia.com/gpu: 1 # Adjust according to your needs + readinessProbe: + httpGet: + path: / + port: 7860 + initialDelaySeconds: 120 + periodSeconds: 30 + volumes: + - name: models-volume + hostPath: + path: $(YOUR-LOCAL-PATH)/models # absolute path of pre-download model on the host machine + - name: image-outputs + hostPath: + path: $(YOUR-LOCAL-PATH)/outputs + - name: extensions-volume + hostPath: + path: $(YOUR-LOCAL-PATH)/extensions # absolute path of extensions + - name: embeddings-volume + hostPath: + path: $(YOUR-LOCAL-PATH)/embeddings # absolute path of pre-download embeddings + + +--- +apiVersion: v1 +kind: Service +metadata: + name: stable-diffusion-webui-service +spec: + type: NodePort # You can change this to LoadBalancer if needed + ports: + - port: 7860 + targetPort: 7860 + selector: + app: stable-diffusion-webui