From cfe67b6d96f96fa378f44a8105db753f053dfa36 Mon Sep 17 00:00:00 2001 From: zuencap <37028435+zuencap@users.noreply.github.com> Date: Tue, 20 Dec 2022 14:40:42 +0100 Subject: [PATCH 1/4] Enable user switch between checkpoint copies Set ckpt-dir so that user can switch between checkpoint copies using webui --- fast_DreamBooth.ipynb | 1665 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1665 insertions(+) create mode 100644 fast_DreamBooth.ipynb diff --git a/fast_DreamBooth.ipynb b/fast_DreamBooth.ipynb new file mode 100644 index 00000000..06008218 --- /dev/null +++ b/fast_DreamBooth.ipynb @@ -0,0 +1,1665 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "qEsNHTtVlbkV" + }, + "source": [ + "# **fast-DreamBooth colab From https://github.com/TheLastBen/fast-stable-diffusion, if you face any issues, feel free to discuss them.**\n", + "Keep your notebook updated for best experience. [Support](https://ko-fi.com/thelastben)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "A4Bae3VP6UsE", + "outputId": "c0347d29-b411-4a46-8ceb-b7ca0081c71a", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Mounted at /content/gdrive\n" + ] + } + ], + "source": [ + "from google.colab import drive\n", + "drive.mount('/content/gdrive')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "cellView": "form", + "id": "QyvcqeiL65Tj", + "outputId": "61b76d84-6fdf-4da1-eeaa-7ca38365dd5a", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;32mDONE !\n" + ] + } + ], + "source": [ + "#@markdown # Dependencies\n", + "\n", + "from IPython.utils import capture\n", + "import time\n", + "\n", + "with capture.capture_output() as cap:\n", + " %cd /content/\n", + " !pip install -q accelerate==0.12.0\n", + " for i in range(1,6):\n", + " !wget -q \"https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dependencies/Dependencies.{i}\"\n", + " !mv \"Dependencies.{i}\" \"Dependencies.7z.00{i}\"\n", + " !7z x -y Dependencies.7z.001\n", + " time.sleep(2)\n", + " !cp -r /content/usr/local/lib/python3.8/dist-packages /usr/local/lib/python3.8/\n", + " !rm -r /content/usr\n", + " for i in range(1,6):\n", + " !rm \"Dependencies.7z.00{i}\"\n", + " !pip uninstall -y diffusers\n", + " !git clone --branch updt https://github.com/TheLastBen/diffusers\n", + " !pip install -q /content/diffusers\n", + "print('\u001b[1;32mDONE !')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R3SsbIlxw66N" + }, + "source": [ + "# Model Download" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "cellView": "form", + "id": "O3KHGKqyeJp9", + "outputId": "7a02fbf9-ff65-4c2a-9f7c-57c82f748893", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;32mDONE !\n" + ] + } + ], + "source": [ + "import os\n", + "import time\n", + "from IPython.display import clear_output\n", + "import wget\n", + "\n", + "#@markdown - Skip this cell if you are loading a previous session\n", + "\n", + "#@markdown ---\n", + "\n", + "Model_Version = \"V2.1-512px\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n", + "\n", + "#@markdown - Choose which version to finetune.\n", + "\n", + "#@markdown ---\n", + "\n", + "with capture.capture_output() as cap:\n", + " %cd /content/\n", + "\n", + "Huggingface_Token = \"\" #@param {type:\"string\"}\n", + "token=Huggingface_Token\n", + "\n", + "#@markdown - Leave EMPTY if you're using the v2 model.\n", + "#@markdown - Make sure you've accepted the terms in https://huggingface.co/runwayml/stable-diffusion-v1-5\n", + "\n", + "#@markdown ---\n", + "Custom_Model_Version=\"1.5\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n", + "#@markdown - Choose wisely!\n", + "\n", + "Path_to_HuggingFace= \"\" #@param {type:\"string\"}\n", + "\n", + "\n", + "#@markdown - Load and finetune a model from Hugging Face, must specify if v2, use the format \"profile/model\" like : runwayml/stable-diffusion-v1-5\n", + "\n", + "#@markdown Or\n", + "\n", + "CKPT_Path = \"\" #@param {type:\"string\"}\n", + "\n", + "#@markdown Or\n", + "\n", + "CKPT_Link = \"\" #@param {type:\"string\"}\n", + "\n", + "#@markdown - A CKPT direct link, huggingface CKPT link or a shared CKPT from gdrive.\n", + "#@markdown ---\n", + "\n", + "def downloadmodel():\n", + " token=Huggingface_Token\n", + " if token==\"\":\n", + " token=input(\"Insert your huggingface token :\")\n", + " if os.path.exists('/content/stable-diffusion-v1-5'):\n", + " !rm -r /content/stable-diffusion-v1-5\n", + " clear_output()\n", + "\n", + " %cd /content/\n", + " clear_output()\n", + " !mkdir /content/stable-diffusion-v1-5\n", + " %cd /content/stable-diffusion-v1-5\n", + " !git init\n", + " !git lfs install --system --skip-repo\n", + " !git remote add -f origin \"https://USER:{token}@huggingface.co/runwayml/stable-diffusion-v1-5\"\n", + " !git config core.sparsecheckout true\n", + " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nmodel_index.json\" > .git/info/sparse-checkout\n", + " !git pull origin main\n", + " if os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n", + " !git clone \"https://USER:{token}@huggingface.co/stabilityai/sd-vae-ft-mse\"\n", + " !mv /content/stable-diffusion-v1-5/sd-vae-ft-mse /content/stable-diffusion-v1-5/vae\n", + " !rm -r /content/stable-diffusion-v1-5/.git\n", + " %cd /content/stable-diffusion-v1-5\n", + " !rm model_index.json\n", + " time.sleep(1)\n", + " wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n", + " !sed -i 's@\"clip_sample\": false@@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n", + " !sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n", + " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-v1-5/vae/config.json\n", + " %cd /content/\n", + " clear_output()\n", + " print('\u001b[1;32mDONE !')\n", + " else:\n", + " while not os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n", + " print('\u001b[1;31mMake sure you accepted the terms in https://huggingface.co/runwayml/stable-diffusion-v1-5')\n", + " time.sleep(5)\n", + "\n", + "def newdownloadmodel():\n", + "\n", + " %cd /content/\n", + " clear_output()\n", + " !mkdir /content/stable-diffusion-v2-768\n", + " %cd /content/stable-diffusion-v2-768\n", + " !git init\n", + " !git lfs install --system --skip-repo\n", + " !git remote add -f origin \"https://USER:{token}@huggingface.co/stabilityai/stable-diffusion-2-1\"\n", + " !git config core.sparsecheckout true\n", + " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nmodel_index.json\" > .git/info/sparse-checkout\n", + " !git pull origin main\n", + " clear_output()\n", + " print('\u001b[1;32mDONE !')\n", + "\n", + "\n", + "def newdownloadmodelb():\n", + "\n", + " %cd /content/\n", + " clear_output()\n", + " !mkdir /content/stable-diffusion-v2-512\n", + " %cd /content/stable-diffusion-v2-512\n", + " !git init\n", + " !git lfs install --system --skip-repo\n", + " !git remote add -f origin \"https://USER:{token}@huggingface.co/stabilityai/stable-diffusion-2-1-base\"\n", + " !git config core.sparsecheckout true\n", + " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nmodel_index.json\" > .git/info/sparse-checkout\n", + " !git pull origin main\n", + " clear_output()\n", + " print('\u001b[1;32mDONE !')\n", + "\n", + "\n", + "if Path_to_HuggingFace != \"\":\n", + " if Custom_Model_Version=='V2.1-512px' or Custom_Model_Version=='V2.1-768px':\n", + " if os.path.exists('/content/stable-diffusion-custom'):\n", + " !rm -r /content/stable-diffusion-custom\n", + " clear_output()\n", + " %cd /content/\n", + " clear_output()\n", + " !mkdir /content/stable-diffusion-custom\n", + " %cd /content/stable-diffusion-custom\n", + " !git init\n", + " !git lfs install --system --skip-repo\n", + " !git remote add -f origin \"https://USER:{token}@huggingface.co/{Path_to_HuggingFace}\"\n", + " !git config core.sparsecheckout true\n", + " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nmodel_index.json\" > .git/info/sparse-checkout\n", + " !git pull origin main\n", + " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " !rm -r /content/stable-diffusion-custom/.git\n", + " %cd /content/\n", + " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", + " clear_output()\n", + " print('\u001b[1;32mDONE !')\n", + " else:\n", + " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " print('\u001b[1;31mCheck the link you provided')\n", + " time.sleep(5)\n", + " else:\n", + " if os.path.exists('/content/stable-diffusion-custom'):\n", + " !rm -r /content/stable-diffusion-custom\n", + " clear_output()\n", + " %cd /content/\n", + " clear_output()\n", + " !mkdir /content/stable-diffusion-custom\n", + " %cd /content/stable-diffusion-custom\n", + " !git init\n", + " !git lfs install --system --skip-repo\n", + " !git remote add -f origin \"https://USER:{token}@huggingface.co/{Path_to_HuggingFace}\"\n", + " !git config core.sparsecheckout true\n", + " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nmodel_index.json\" > .git/info/sparse-checkout\n", + " !git pull origin main\n", + " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " !git clone \"https://USER:{token}@huggingface.co/stabilityai/sd-vae-ft-mse\"\n", + " !mv /content/stable-diffusion-custom/sd-vae-ft-mse /content/stable-diffusion-custom/vae\n", + " !rm -r /content/stable-diffusion-custom/.git\n", + " %cd /content/stable-diffusion-custom\n", + " !rm model_index.json\n", + " time.sleep(1)\n", + " wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n", + " !sed -i 's@\"clip_sample\": false,@@g' /content/stable-diffusion-custom/scheduler/scheduler_config.json\n", + " !sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-custom/scheduler/scheduler_config.json\n", + " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-custom/vae/config.json\n", + " %cd /content/\n", + " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", + " clear_output()\n", + " print('\u001b[1;32mDONE !')\n", + " else:\n", + " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " print('\u001b[1;31mCheck the link you provided')\n", + " time.sleep(5)\n", + "\n", + "elif CKPT_Path !=\"\":\n", + " %cd /content\n", + " clear_output()\n", + " if os.path.exists(str(CKPT_Path)):\n", + " if Custom_Model_Version=='1.5':\n", + " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", + " !unzip -o -q refmdlz\n", + " !rm -f refmdlz\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", + " clear_output()\n", + " !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v1\n", + " !rm -r /content/refmdl\n", + " elif Custom_Model_Version=='V2.1-512px':\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", + " clear_output()\n", + " !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n", + " elif Custom_Model_Version=='V2.1-768px':\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", + " clear_output()\n", + " !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1\n", + " !rm /content/convertodiff.py\n", + " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " clear_output()\n", + " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", + " print('\u001b[1;32mDONE !')\n", + " else:\n", + " !rm -r /content/stable-diffusion-custom\n", + " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " print('\u001b[1;31mConversion error')\n", + " time.sleep(5)\n", + " else:\n", + " while not os.path.exists(str(CKPT_Path)):\n", + " print('\u001b[1;31mWrong path, use the colab file explorer to copy the path')\n", + " time.sleep(5)\n", + "\n", + "elif CKPT_Link !=\"\":\n", + " %cd /content\n", + " clear_output()\n", + " !gdown --fuzzy -O model.ckpt $CKPT_Link\n", + " clear_output()\n", + " if os.path.exists('/content/model.ckpt'):\n", + " if os.path.getsize(\"/content/model.ckpt\") > 1810671599:\n", + " if Custom_Model_Version=='1.5':\n", + " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", + " !unzip -o -q refmdlz\n", + " !rm -f refmdlz\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", + " clear_output()\n", + " !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v1\n", + " !rm -r /content/refmdl\n", + " elif Custom_Model_Version=='V2.1-512px':\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", + " clear_output()\n", + " !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n", + " elif Custom_Model_Version=='V2.1-768px':\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", + " clear_output()\n", + " !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1\n", + " !rm /content/convertodiff.py\n", + " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " clear_output()\n", + " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", + " print('\u001b[1;32mDONE !')\n", + " else:\n", + " !rm -r /content/stable-diffusion-custom\n", + " !rm /content/model.ckpt\n", + " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", + " print('\u001b[1;31mConversion error')\n", + " time.sleep(5)\n", + " else:\n", + " while os.path.getsize('/content/model.ckpt') < 1810671599:\n", + " print('\u001b[1;31mWrong link, check that the link is valid')\n", + " time.sleep(5)\n", + "\n", + "else:\n", + " if Model_Version==\"1.5\":\n", + " if not os.path.exists('/content/stable-diffusion-v1-5'):\n", + " downloadmodel()\n", + " MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n", + " else:\n", + " MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n", + " print(\"\u001b[1;32mThe v1.5 model already exists, using this model.\")\n", + " elif Model_Version==\"V2.1-512px\":\n", + " if not os.path.exists('/content/stable-diffusion-v2-512'):\n", + " newdownloadmodelb()\n", + " MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n", + " else:\n", + " MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n", + " print(\"\u001b[1;32mThe v2-512px model already exists, using this model.\")\n", + " elif Model_Version==\"V2.1-768px\":\n", + " if not os.path.exists('/content/stable-diffusion-v2-768'):\n", + " newdownloadmodel()\n", + " MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n", + " else:\n", + " MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n", + " print(\"\u001b[1;32mThe v2-768px model already exists, using this model.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0tN76Cj5P3RL" + }, + "source": [ + "# Dreambooth" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "cellView": "form", + "id": "A1B299g-_VJo", + "outputId": "8e667545-16d6-488a-c737-2bd3f7f20111", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;32mCreating session...\n", + "\u001b[1;32mSession created, proceed to uploading instance images\n" + ] + } + ], + "source": [ + "import os\n", + "from IPython.display import clear_output\n", + "from IPython.utils import capture\n", + "from os import listdir\n", + "from os.path import isfile\n", + "import wget\n", + "import time\n", + "\n", + "#@markdown #Create/Load a Session\n", + "\n", + "try:\n", + " MODEL_NAME\n", + " pass\n", + "except:\n", + " MODEL_NAME=\"\"\n", + "\n", + "PT=\"\"\n", + "\n", + "Session_Name = \"elzczm\" #@param{type: 'string'}\n", + "while Session_Name==\"\":\n", + " print('\u001b[1;31mInput the Session Name:')\n", + " Session_Name=input('')\n", + "Session_Name=Session_Name.replace(\" \",\"_\")\n", + "\n", + "#@markdown - Enter the session name, it if it exists, it will load it, otherwise it'll create an new session.\n", + "\n", + "Session_Link_optional = \"\" #@param{type: 'string'}\n", + "\n", + "#@markdown - Import a session from another gdrive, the shared gdrive link must point to the specific session's folder that contains the trained CKPT, remove any intermediary CKPT if any.\n", + "\n", + "WORKSPACE='/content/gdrive/MyDrive/Fast-Dreambooth'\n", + "\n", + "if Session_Link_optional !=\"\":\n", + " print('\u001b[1;32mDownloading session...')\n", + "with capture.capture_output() as cap:\n", + " %cd /content\n", + " if Session_Link_optional != \"\":\n", + " if not os.path.exists(str(WORKSPACE+'/Sessions')):\n", + " %mkdir -p $WORKSPACE'/Sessions'\n", + " time.sleep(1)\n", + " %cd $WORKSPACE'/Sessions'\n", + " !gdown --folder --remaining-ok -O $Session_Name $Session_Link_optional\n", + " %cd $Session_Name\n", + " !rm -r instance_images\n", + " !unzip instance_images.zip\n", + " !rm -r concept_images\n", + " !unzip concept_images.zip\n", + " %cd /content\n", + "\n", + "\n", + "INSTANCE_NAME=Session_Name\n", + "OUTPUT_DIR=\"/content/models/\"+Session_Name\n", + "SESSION_DIR=WORKSPACE+'/Sessions/'+Session_Name\n", + "INSTANCE_DIR=SESSION_DIR+'/instance_images'\n", + "CONCEPT_DIR=SESSION_DIR+'/concept_images'\n", + "MDLPTH=str(SESSION_DIR+\"/\"+Session_Name+'.ckpt')\n", + "\n", + "Model_Version = \"1.5\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n", + "#@markdown - Ignore this if you're not loading a previous session that contains a trained model\n", + "\n", + "\n", + "if os.path.exists(str(SESSION_DIR)):\n", + " mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(\".\")[-1]==\"ckpt\"]\n", + " if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):\n", + "\n", + " def f(n):\n", + " k=0\n", + " for i in mdls:\n", + " if k==n:\n", + " !mv \"$SESSION_DIR/$i\" $MDLPTH\n", + " k=k+1\n", + "\n", + " k=0\n", + " print('\u001b[1;33mNo final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\\n\u001b[1;34m')\n", + "\n", + " for i in mdls:\n", + " print(str(k)+'- '+i)\n", + " k=k+1\n", + " n=input()\n", + " while int(n)>k-1:\n", + " n=input()\n", + " if n!=\"000\":\n", + " f(int(n))\n", + " print('\u001b[1;32mUsing the model '+ mdls[int(n)]+\" ...\")\n", + " time.sleep(2)\n", + " else:\n", + " print('\u001b[1;32mSkipping the intermediary checkpoints.')\n", + " del n\n", + "\n", + "\n", + "if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):\n", + " print('\u001b[1;32mLoading session with no previous model, using the original model or the custom downloaded model')\n", + " if MODEL_NAME==\"\":\n", + " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", + " else:\n", + " print('\u001b[1;32mSession Loaded, proceed to uploading instance images')\n", + "\n", + "elif os.path.exists(MDLPTH):\n", + " print('\u001b[1;32mSession found, loading the trained model ...')\n", + " if Model_Version=='1.5':\n", + " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", + " !unzip -o -q refmdlz\n", + " !rm -f refmdlz\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", + " clear_output()\n", + " print('\u001b[1;32mSession found, loading the trained model ...')\n", + " !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v1\n", + " !rm -r /content/refmdl\n", + " elif Model_Version=='V2.1-512px':\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", + " clear_output()\n", + " print('\u001b[1;32mSession found, loading the trained model ...')\n", + " !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n", + " elif Model_Version=='V2.1-768px':\n", + " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", + " clear_output()\n", + " print('\u001b[1;32mSession found, loading the trained model ...')\n", + " !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v2 --reference_model stabilityai/stable-diffusion-2-1\n", + " !rm /content/convertodiff.py\n", + " if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", + " resume=True\n", + " clear_output()\n", + " print('\u001b[1;32mSession loaded.')\n", + " else:\n", + " if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", + " print('\u001b[1;31mConversion error, if the error persists, remove the CKPT file from the current session folder')\n", + "\n", + "elif not os.path.exists(str(SESSION_DIR)):\n", + " %mkdir -p \"$INSTANCE_DIR\"\n", + " print('\u001b[1;32mCreating session...')\n", + " if MODEL_NAME==\"\":\n", + " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", + " else:\n", + " print('\u001b[1;32mSession created, proceed to uploading instance images')\n", + "\n", + " #@markdown\n", + "\n", + " #@markdown # The most importent step is to rename the instance pictures of each subject to a unique unknown identifier, example :\n", + " #@markdown - If you have 30 pictures of yourself, simply select them all and rename only one to the chosen identifier for example : phtmejhn, the files would be : phtmejhn (1).jpg, phtmejhn (2).png ....etc then upload them, do the same for other people or objects with a different identifier, and that's it.\n", + " #@markdown - Check out this example : https://i.imgur.com/d2lD3rz.jpeg" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "cellView": "form", + "id": "LC4ukG60fgMy", + "outputId": "486c2ba3-9fe7-4a5d-fa13-395b07c63058", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 90 + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "\r |███████████████| 1/1 Uploaded" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "\u001b[1;32mDone, proceed to the next cell\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "\n" + ] + } + ], + "source": [ + "import shutil\n", + "from google.colab import files\n", + "from PIL import Image\n", + "from tqdm import tqdm\n", + "\n", + "#@markdown #Instance Images\n", + "#@markdown ----\n", + "\n", + "#@markdown\n", + "#@markdown - Run the cell to upload the instance pictures.\n", + "\n", + "Remove_existing_instance_images= True #@param{type: 'boolean'}\n", + "#@markdown - Uncheck the box to keep the existing instance images.\n", + "\n", + "\n", + "if Remove_existing_instance_images:\n", + " if os.path.exists(str(INSTANCE_DIR)):\n", + " !rm -r \"$INSTANCE_DIR\"\n", + "\n", + "if not os.path.exists(str(INSTANCE_DIR)):\n", + " %mkdir -p \"$INSTANCE_DIR\"\n", + "\n", + "IMAGES_FOLDER_OPTIONAL=\"\" #@param{type: 'string'}\n", + "\n", + "#@markdown - If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) instance images. Leave EMPTY to upload.\n", + "\n", + "Crop_images= True #@param{type: 'boolean'}\n", + "Crop_size = \"512\" #@param [\"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"]\n", + "Crop_size=int(Crop_size)\n", + "\n", + "#@markdown - Unless you want to crop them manually in a precise way, you don't need to crop your instance images externally.\n", + "\n", + "while IMAGES_FOLDER_OPTIONAL !=\"\" and not os.path.exists(str(IMAGES_FOLDER_OPTIONAL)):\n", + " print('\u001b[1;31mThe image folder specified does not exist, use the colab file explorer to copy the path :')\n", + " IMAGES_FOLDER_OPTIONAL=input('')\n", + "\n", + "if IMAGES_FOLDER_OPTIONAL!=\"\":\n", + " if Crop_images:\n", + " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " extension = filename.split(\".\")[-1]\n", + " identifier=filename.split(\".\")[0]\n", + " new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n", + " file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n", + " width, height = file.size\n", + " if file.size !=(Crop_size, Crop_size):\n", + " side_length = min(width, height)\n", + " left = (width - side_length)/2\n", + " top = (height - side_length)/2\n", + " right = (width + side_length)/2\n", + " bottom = (height + side_length)/2\n", + " image = file.crop((left, top, right, bottom))\n", + " image = image.resize((Crop_size, Crop_size))\n", + " if (extension.upper() == \"JPG\"):\n", + " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", + " else:\n", + " image.save(new_path_with_file, format=extension.upper())\n", + " else:\n", + " !cp \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$INSTANCE_DIR\"\n", + "\n", + " else:\n", + " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$INSTANCE_DIR\"\n", + "\n", + " print('\\n\u001b[1;32mDone, proceed to the next cell')\n", + "\n", + "\n", + "elif IMAGES_FOLDER_OPTIONAL ==\"\":\n", + " uploaded = files.upload()\n", + " if Crop_images:\n", + " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " shutil.move(filename, INSTANCE_DIR)\n", + " extension = filename.split(\".\")[-1]\n", + " identifier=filename.split(\".\")[0]\n", + " new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n", + " file = Image.open(new_path_with_file)\n", + " width, height = file.size\n", + " if file.size !=(Crop_size, Crop_size):\n", + " side_length = min(width, height)\n", + " left = (width - side_length)/2\n", + " top = (height - side_length)/2\n", + " right = (width + side_length)/2\n", + " bottom = (height + side_length)/2\n", + " image = file.crop((left, top, right, bottom))\n", + " image = image.resize((Crop_size, Crop_size))\n", + " if (extension.upper() == \"JPG\"):\n", + " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", + " else:\n", + " image.save(new_path_with_file, format=extension.upper())\n", + " clear_output()\n", + " else:\n", + " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " shutil.move(filename, INSTANCE_DIR)\n", + " clear_output()\n", + "\n", + " print('\\n\u001b[1;32mDone, proceed to the next cell')\n", + "\n", + "with capture.capture_output() as cap:\n", + " %cd \"$INSTANCE_DIR\"\n", + " !find . -name \"* *\" -type f | rename 's/ /-/g'\n", + "\n", + " %cd $SESSION_DIR\n", + " !rm instance_images.zip\n", + " !zip -r instance_images instance_images\n", + " %cd /content" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "cellView": "form", + "id": "LxEv3u8mQos3", + "outputId": "44dccae5-02b5-4e65-aa4b-53dcac4133c3", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 646 + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "\r |███████████████| 17/17 Uploaded\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "\u001b[1;32mAlmost done...\n", + "\n", + "\u001b[1;32mDone, proceed to the training cell\n" + ] + } + ], + "source": [ + "import shutil\n", + "from google.colab import files\n", + "from PIL import Image\n", + "from tqdm import tqdm\n", + "\n", + "#@markdown #Concept Images\n", + "#@markdown ----\n", + "\n", + "#@markdown\n", + "#@markdown - Run this `optional` cell to upload concept pictures. If you're traning on a specific face, skip this cell.\n", + "#@markdown - Training a model on a restricted number of instance images tends to indoctrinate it and limit its imagination, so concept images help re-opening its \"mind\" to diversity and greatly widen the range of possibilities of the output, concept images should contain anything related to the instance pictures, including objects, ideas, scenes, phenomenons, concepts (obviously), don't be afraid to slightly diverge from the trained style. The resolution of the pictures doesn't matter.\n", + "\n", + "Remove_existing_concept_images= True #@param{type: 'boolean'}\n", + "#@markdown - Uncheck the box to keep the existing concept images.\n", + "\n", + "\n", + "if Remove_existing_concept_images:\n", + " if os.path.exists(str(CONCEPT_DIR)):\n", + " !rm -r \"$CONCEPT_DIR\"\n", + "\n", + "if not os.path.exists(str(CONCEPT_DIR)):\n", + " %mkdir -p \"$CONCEPT_DIR\"\n", + "\n", + "IMAGES_FOLDER_OPTIONAL=\"\" #@param{type: 'string'}\n", + "\n", + "#@markdown - If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) concept images. Leave EMPTY to upload.\n", + "\n", + "Crop_images= True\n", + "Crop_size = \"512\"\n", + "Crop_size=int(Crop_size)\n", + "\n", + "while IMAGES_FOLDER_OPTIONAL !=\"\" and not os.path.exists(str(IMAGES_FOLDER_OPTIONAL)):\n", + " print('\u001b[1;31mThe image folder specified does not exist, use the colab file explorer to copy the path :')\n", + " IMAGES_FOLDER_OPTIONAL=input('')\n", + "\n", + "if IMAGES_FOLDER_OPTIONAL!=\"\":\n", + " if Crop_images:\n", + " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " extension = filename.split(\".\")[-1]\n", + " identifier=filename.split(\".\")[0]\n", + " new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n", + " file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n", + " width, height = file.size\n", + " if file.size !=(Crop_size, Crop_size):\n", + " side_length = min(width, height)\n", + " left = (width - side_length)/2\n", + " top = (height - side_length)/2\n", + " right = (width + side_length)/2\n", + " bottom = (height + side_length)/2\n", + " image = file.crop((left, top, right, bottom))\n", + " image = image.resize((Crop_size, Crop_size))\n", + " if (extension.upper() == \"JPG\"):\n", + " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", + " else:\n", + " image.save(new_path_with_file, format=extension.upper())\n", + " else:\n", + " !cp \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$CONCEPT_DIR\"\n", + "\n", + " else:\n", + " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$CONCEPT_DIR\"\n", + "\n", + "elif IMAGES_FOLDER_OPTIONAL ==\"\":\n", + " uploaded = files.upload()\n", + " if Crop_images:\n", + " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " shutil.move(filename, CONCEPT_DIR)\n", + " extension = filename.split(\".\")[-1]\n", + " identifier=filename.split(\".\")[0]\n", + " new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n", + " file = Image.open(new_path_with_file)\n", + " width, height = file.size\n", + " if file.size !=(Crop_size, Crop_size):\n", + " side_length = min(width, height)\n", + " left = (width - side_length)/2\n", + " top = (height - side_length)/2\n", + " right = (width + side_length)/2\n", + " bottom = (height + side_length)/2\n", + " image = file.crop((left, top, right, bottom))\n", + " image = image.resize((Crop_size, Crop_size))\n", + " if (extension.upper() == \"JPG\"):\n", + " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", + " else:\n", + " image.save(new_path_with_file, format=extension.upper())\n", + " clear_output()\n", + " else:\n", + " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", + " shutil.move(filename, CONCEPT_DIR)\n", + " clear_output()\n", + "\n", + "\n", + "print('\\n\u001b[1;32mAlmost done...')\n", + "with capture.capture_output() as cap:\n", + " i=0\n", + " for filename in os.listdir(CONCEPT_DIR):\n", + " extension = filename.split(\".\")[-1]\n", + " identifier=filename.split(\".\")[0]\n", + " new_path_with_file = os.path.join(CONCEPT_DIR, \"conceptimagedb\"+str(i)+\".\"+extension)\n", + " filepath=os.path.join(CONCEPT_DIR,filename)\n", + " !mv \"$filepath\" $new_path_with_file\n", + " i=i+1\n", + "\n", + " %cd $SESSION_DIR\n", + " !rm concept_images.zip\n", + " !zip -r concept_images concept_images\n", + " %cd /content\n", + "\n", + "print('\\n\u001b[1;32mDone, proceed to the training cell')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZnmQYfZilzY6" + }, + "source": [ + "# Training" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "cellView": "form", + "id": "1-9QbkfAVYYU", + "outputId": "90136696-9ae8-44f3-928f-9c6d517c0603", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\n" + ] + } + ], + "source": [ + "#@markdown ---\n", + "#@markdown #Start DreamBooth\n", + "#@markdown ---\n", + "import os\n", + "from subprocess import getoutput\n", + "from IPython.display import clear_output\n", + "from google.colab import runtime\n", + "import time\n", + "import random\n", + "\n", + "if os.path.exists(INSTANCE_DIR+\"/.ipynb_checkpoints\"):\n", + " %rm -r $INSTANCE_DIR\"/.ipynb_checkpoints\"\n", + "\n", + "if os.path.exists(CONCEPT_DIR+\"/.ipynb_checkpoints\"):\n", + " %rm -r $CONCEPT_DIR\"/.ipynb_checkpoints\"\n", + "\n", + "Resume_Training = True #@param {type:\"boolean\"}\n", + "\n", + "try:\n", + " resume\n", + " if resume and not Resume_Training:\n", + " print('\u001b[1;31mOverwrite your previously trained model ?, answering \"yes\" will train a new model, answering \"no\" will resume the training of the previous model?  yes or no ?\u001b[0m')\n", + " while True:\n", + " ansres=input('')\n", + " if ansres=='no':\n", + " Resume_Training = True\n", + " del ansres\n", + " break\n", + " elif ansres=='yes':\n", + " Resume_Training = False\n", + " resume= False\n", + " break\n", + "except:\n", + " pass\n", + "\n", + "while not Resume_Training and MODEL_NAME==\"\":\n", + " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", + " time.sleep(5)\n", + "\n", + "#@markdown - If you're not satisfied with the result, check this box, run again the cell and it will continue training the current model.\n", + "\n", + "MODELT_NAME=MODEL_NAME\n", + "\n", + "UNet_Training_Steps=0 #@param{type: 'number'}\n", + "\n", + "#@markdown - Start with 3000 or lower, test the model, if not enough, resume training for 1000 steps, keep testing until you get the desired output, `set it to 0 to train only the text_encoder`.\n", + "\n", + "Text_Encoder_Training_Steps=0 #@param{type: 'number'}\n", + "\n", + "#@markdown - 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, `set it to 0 before resuming training if it is already trained`.\n", + "\n", + "Text_Encoder_Concept_Training_Steps=1000 #@param{type: 'number'}\n", + "\n", + "#@markdown - Suitable for training a style/concept as it acts as heavy regularization, set it to 1500 steps for 200 concept images (you can go higher), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, `set it to 0 before resuming training if it is already trained`.\n", + "\n", + "trnonltxt=\"\"\n", + "if UNet_Training_Steps==0:\n", + " trnonltxt=\"--train_only_text_encoder\"\n", + "\n", + "Seed=''\n", + "\n", + "Style_Training = False #@param {type:\"boolean\"}\n", + "\n", + "#@markdown - Further reduce overfitting, suitable when training a style or a general theme, keep the steps low.\n", + "\n", + "Style=\"\"\n", + "if Style_Training:\n", + " Style=\"--Style\"\n", + "\n", + "Resolution = \"512\" #@param [\"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"]\n", + "Res=int(Resolution)\n", + "\n", + "#@markdown - Higher resolution = Higher quality, make sure the instance images are cropped to this selected size (or larger).\n", + "\n", + "fp16 = True\n", + "\n", + "if Seed =='' or Seed=='0':\n", + " Seed=random.randint(1, 999999)\n", + "else:\n", + " Seed=int(Seed)\n", + "\n", + "if fp16:\n", + " prec=\"fp16\"\n", + "else:\n", + " prec=\"no\"\n", + "\n", + "s = getoutput('nvidia-smi')\n", + "if 'A100' in s:\n", + " precision=\"no\"\n", + "else:\n", + " precision=prec\n", + "\n", + "resuming=\"\"\n", + "if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", + " MODELT_NAME=OUTPUT_DIR\n", + " print('\u001b[1;32mResuming Training...\u001b[0m')\n", + " resuming=\"Yes\"\n", + "elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", + " print('\u001b[1;31mPrevious model not found, training a new model...\u001b[0m')\n", + " MODELT_NAME=MODEL_NAME\n", + " while MODEL_NAME==\"\":\n", + " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", + " time.sleep(5)\n", + "\n", + "V2=False\n", + "if os.path.getsize(MODELT_NAME+\"/text_encoder/pytorch_model.bin\") > 670901463:\n", + " V2=True\n", + "\n", + "Enable_text_encoder_training= True\n", + "Enable_Text_Encoder_Concept_Training= True\n", + "\n", + "if Text_Encoder_Training_Steps==0:\n", + " Enable_text_encoder_training= False\n", + "else:\n", + " stptxt=Text_Encoder_Training_Steps\n", + "\n", + "if Text_Encoder_Concept_Training_Steps==0:\n", + " Enable_Text_Encoder_Concept_Training= False\n", + "else:\n", + " stptxtc=Text_Encoder_Concept_Training_Steps\n", + "\n", + "\n", + "if Enable_text_encoder_training:\n", + " Textenc=\"--train_text_encoder\"\n", + "else:\n", + " Textenc=\"\"\n", + "\n", + "#@markdown ---------------------------\n", + "Save_Checkpoint_Every_n_Steps = True #@param {type:\"boolean\"}\n", + "Save_Checkpoint_Every=250 #@param{type: 'number'}\n", + "if Save_Checkpoint_Every==None:\n", + " Save_Checkpoint_Every=1\n", + "#@markdown - Minimum 200 steps between each save.\n", + "stp=0\n", + "Start_saving_from_the_step=250 #@param{type: 'number'}\n", + "if Start_saving_from_the_step==None:\n", + " Start_saving_from_the_step=0\n", + "if (Start_saving_from_the_step < 200):\n", + " Start_saving_from_the_step=Save_Checkpoint_Every\n", + "stpsv=Start_saving_from_the_step\n", + "if Save_Checkpoint_Every_n_Steps:\n", + " stp=Save_Checkpoint_Every\n", + "#@markdown - Start saving intermediary checkpoints from this step.\n", + "\n", + "Disconnect_after_training=False #@param {type:\"boolean\"}\n", + "\n", + "#@markdown - Auto-disconnect from google colab after the training to avoid wasting compute units.\n", + "\n", + "def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):\n", + "\n", + " !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n", + " $trnonltxt \\\n", + " --image_captions_filename \\\n", + " --train_text_encoder \\\n", + " --dump_only_text_encoder \\\n", + " --pretrained_model_name_or_path=\"$MODELT_NAME\" \\\n", + " --instance_data_dir=\"$INSTANCE_DIR\" \\\n", + " --output_dir=\"$OUTPUT_DIR\" \\\n", + " --instance_prompt=\"$PT\" \\\n", + " --seed=$Seed \\\n", + " --resolution=512 \\\n", + " --mixed_precision=$precision \\\n", + " --train_batch_size=1 \\\n", + " --gradient_accumulation_steps=1 --gradient_checkpointing \\\n", + " --use_8bit_adam \\\n", + " --learning_rate=2e-6 \\\n", + " --lr_scheduler=\"polynomial\" \\\n", + " --lr_warmup_steps=0 \\\n", + " --max_train_steps=$Training_Steps\n", + "\n", + "def train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps):\n", + " clear_output()\n", + " if resuming==\"Yes\":\n", + " print('\u001b[1;32mResuming Training...\u001b[0m')\n", + " print('\u001b[1;33mTraining the UNet...\u001b[0m')\n", + " !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n", + " $Style \\\n", + " --image_captions_filename \\\n", + " --train_only_unet \\\n", + " --save_starting_step=$stpsv \\\n", + " --save_n_steps=$stp \\\n", + " --Session_dir=$SESSION_DIR \\\n", + " --pretrained_model_name_or_path=\"$MODELT_NAME\" \\\n", + " --instance_data_dir=\"$INSTANCE_DIR\" \\\n", + " --output_dir=\"$OUTPUT_DIR\" \\\n", + " --instance_prompt=\"$PT\" \\\n", + " --seed=$Seed \\\n", + " --resolution=$Res \\\n", + " --mixed_precision=$precision \\\n", + " --train_batch_size=1 \\\n", + " --gradient_accumulation_steps=1 --gradient_checkpointing \\\n", + " --use_8bit_adam \\\n", + " --learning_rate=2e-6 \\\n", + " --lr_scheduler=\"polynomial\" \\\n", + " --lr_warmup_steps=0 \\\n", + " --max_train_steps=$Training_Steps\n", + "\n", + "\n", + "if Enable_text_encoder_training :\n", + " print('\u001b[1;33mTraining the text encoder...\u001b[0m')\n", + " if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):\n", + " %rm -r $OUTPUT_DIR\"/text_encoder_trained\"\n", + " dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)\n", + "if Enable_Text_Encoder_Concept_Training and os.listdir(CONCEPT_DIR)!=[]:\n", + " clear_output()\n", + " if resuming==\"Yes\":\n", + " print('\u001b[1;32mResuming Training...\u001b[0m')\n", + " print('\u001b[1;33mTraining the text encoder on the concept...\u001b[0m')\n", + " dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)\n", + "elif Enable_Text_Encoder_Concept_Training and os.listdir(CONCEPT_DIR)==[]:\n", + " print('\u001b[1;31mNo concept images found, skipping concept training...')\n", + " time.sleep(8)\n", + "if UNet_Training_Steps!=0:\n", + " train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps=UNet_Training_Steps)\n", + "\n", + "\n", + "if os.path.exists('/content/models/'+INSTANCE_NAME+'/unet/diffusion_pytorch_model.bin'):\n", + " prc=\"--fp16\" if precision==\"fp16\" else \"\"\n", + " if V2:\n", + " !python /content/diffusers/scripts/convertosdv2.py $prc $OUTPUT_DIR $SESSION_DIR/$Session_Name\".ckpt\"\n", + " clear_output()\n", + " if os.path.exists(SESSION_DIR+\"/\"+INSTANCE_NAME+'.ckpt'):\n", + " clear_output()\n", + " print(\"\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\")\n", + " if Disconnect_after_training :\n", + " time.sleep(20)\n", + " runtime.unassign()\n", + " else:\n", + " print(\"\u001b[1;31mSomething went wrong\")\n", + " else:\n", + " !wget -O /content/convertosd.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertosd.py\n", + " clear_output()\n", + " if precision==\"no\":\n", + " !sed -i '226s@.*@@' /content/convertosd.py\n", + " !sed -i '201s@.*@ model_path = \"{OUTPUT_DIR}\"@' /content/convertosd.py\n", + " !sed -i '202s@.*@ checkpoint_path= \"{SESSION_DIR}/{Session_Name}.ckpt\"@' /content/convertosd.py\n", + " !python /content/convertosd.py\n", + " !rm /content/convertosd.py\n", + " clear_output()\n", + " if os.path.exists(SESSION_DIR+\"/\"+INSTANCE_NAME+'.ckpt'):\n", + " print(\"\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\")\n", + " if Disconnect_after_training :\n", + " time.sleep(20)\n", + " runtime.unassign()\n", + " else:\n", + " print(\"\u001b[1;31mSomething went wrong\")\n", + "\n", + "else:\n", + " print(\"\u001b[1;31mSomething went wrong\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ehi1KKs-l-ZS" + }, + "source": [ + "# Test The Trained Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iAZGngFcI8hq", + "outputId": "f9012994-7d17-481d-fdc0-7362318eefaf", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "LatentDiffusion: Running in eps-prediction mode\n", + "DiffusionWrapper has 865.91 M params.\n" + ] + } + ], + "source": [ + "import os\n", + "import time\n", + "import sys\n", + "import fileinput\n", + "from IPython.display import clear_output\n", + "from subprocess import getoutput\n", + "from IPython.utils import capture\n", + "\n", + "\n", + "Model_Version = \"V2.1-512\" #@param [\"1.5\", \"V2.1-512\", \"V2.1-768\"]\n", + "#@markdown - Important! Choose the correct version and resolution of the model\n", + "\n", + "Update_repo = True\n", + "\n", + "Session__Name=\"\" #@param{type: 'string'}\n", + "\n", + "#@markdown - Leave empty if you want to use the current trained model.\n", + "\n", + "Use_Custom_Path = False #@param {type:\"boolean\"}\n", + "\n", + "try:\n", + " INSTANCE_NAME\n", + " INSTANCET=INSTANCE_NAME\n", + "except:\n", + " pass\n", + "#@markdown - if checked, an input box will ask the full path to a desired model.\n", + "\n", + "if Session__Name!=\"\":\n", + " INSTANCET=Session__Name\n", + " INSTANCET=INSTANCET.replace(\" \",\"_\")\n", + "\n", + "if Use_Custom_Path:\n", + " try:\n", + " INSTANCET\n", + " del INSTANCET\n", + " except:\n", + " pass\n", + "\n", + "try:\n", + " INSTANCET\n", + " if Session__Name!=\"\":\n", + " path_to_trained_model='/content/gdrive/MyDrive/Fast-Dreambooth/Sessions/'+Session__Name+\"/\"+Session__Name+'.ckpt'\n", + " else:\n", + " path_to_trained_model=SESSION_DIR+\"/\"+INSTANCET+'.ckpt'\n", + "except:\n", + " print('\u001b[1;31mIt seems that you did not perform training during this session \u001b[1;32mor you chose to use a custom path,\\nprovide the full path to the model (including the name of the model):\\n')\n", + " path_to_trained_model=input()\n", + "\n", + "while not os.path.exists(path_to_trained_model):\n", + " print(\"\u001b[1;31mThe model doesn't exist on you Gdrive, use the file explorer to get the path : \")\n", + " path_to_trained_model=input()\n", + "\n", + "\n", + "with capture.capture_output() as cap:\n", + " %cd /content/gdrive/MyDrive/\n", + " %mkdir sd\n", + " %cd sd\n", + " !git clone https://github.com/Stability-AI/stablediffusion\n", + " !git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui\n", + " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", + " !mkdir -p cache/{huggingface,torch}\n", + " %cd /content/\n", + " !ln -s /content/gdrive/MyDrive/sd/stable-diffusion-webui/cache/huggingface ../root/.cache/\n", + " !ln -s /content/gdrive/MyDrive/sd/stable-diffusion-webui/cache/torch ../root/.cache/\n", + " !wget -O /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/shared.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/modules/shared.py\n", + "\n", + "if Update_repo:\n", + " with capture.capture_output() as cap:\n", + " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.sh\n", + " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/paths.py\n", + " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py\n", + " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/ui.py\n", + " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/style.css\n", + " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", + " print('\u001b[1;32m')\n", + " !git pull\n", + "\n", + "\n", + "with capture.capture_output() as cap:\n", + "\n", + " if not os.path.exists('/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion'):\n", + " !mkdir /content/gdrive/MyDrive/sd/stablediffusion/src\n", + " %cd /content/gdrive/MyDrive/sd/stablediffusion/src\n", + " !git clone https://github.com/CompVis/taming-transformers\n", + " !git clone https://github.com/openai/CLIP\n", + " !git clone https://github.com/salesforce/BLIP\n", + " !git clone https://github.com/sczhou/CodeFormer\n", + " !git clone https://github.com/crowsonkb/k-diffusion\n", + " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/CLIP /content/gdrive/MyDrive/sd/stablediffusion/src/clip\n", + " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/BLIP /content/gdrive/MyDrive/sd/stablediffusion/src/blip\n", + " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/CodeFormer /content/gdrive/MyDrive/sd/stablediffusion/src/codeformer\n", + " !cp -r /content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", + "\n", + "\n", + "with capture.capture_output() as cap:\n", + " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules\n", + " !wget -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py\n", + "\n", + "with capture.capture_output() as cap:\n", + " if not os.path.exists('/tools/node/bin/lt'):\n", + " !npm install -g localtunnel\n", + "\n", + "with capture.capture_output() as cap:\n", + " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", + " time.sleep(1)\n", + " !wget -O webui.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.py\n", + " !sed -i 's@ui.create_ui().*@ui.create_ui();shared.demo.queue(concurrency_count=999999,status_update_rate=0.1)@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py\n", + " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/\n", + " !wget -O ui.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/modules/ui.py\n", + " !sed -i 's@css = \"\".*@with open(os.path.join(script_path, \"style.css\"), \"r\", encoding=\"utf8\") as file:\\n css = file.read()@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/ui.py\n", + " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui\n", + " !wget -O style.css https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/style.css\n", + " !sed -i 's@min-height: 4.*@min-height: 5.5em;@g' /content/gdrive/MyDrive/sd/stable-diffusion-webui/style.css\n", + " !sed -i 's@\"multiple_tqdm\": true,@\\\"multiple_tqdm\": false,@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/config.json\n", + " !sed -i '902s@.*@ self.logvar = self.logvar.to(self.device)@' /content/gdrive/MyDrive/sd/stablediffusion/ldm/models/diffusion/ddpm.py\n", + " %cd /content\n", + "\n", + "\n", + "Use_Gradio_Server = True #@param {type:\"boolean\"}\n", + "#@markdown - Only if you have trouble connecting to the local server.\n", + "\n", + "Large_Model= False #@param {type:\"boolean\"}\n", + "#@markdown - Check if you have trouble loading a model 7GB+\n", + "\n", + "if Large_Model:\n", + " !sed -i 's@cmd_opts.lowram else \\\"cpu\\\"@cmd_opts.lowram else \\\"cuda\\\"@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/shared.py\n", + "else:\n", + " !sed -i 's@cmd_opts.lowram else \\\"cuda\\\"@cmd_opts.lowram else \\\"cpu\\\"@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/shared.py\n", + "\n", + "\n", + "share=''\n", + "if Use_Gradio_Server:\n", + " share='--share'\n", + " for line in fileinput.input('/usr/local/lib/python3.8/dist-packages/gradio/blocks.py', inplace=True):\n", + " if line.strip().startswith('self.server_name ='):\n", + " line = ' self.server_name = server_name\\n'\n", + " if line.strip().startswith('self.server_port ='):\n", + " line = ' self.server_port = server_port\\n'\n", + " sys.stdout.write(line)\n", + " clear_output()\n", + "\n", + "else:\n", + " share=''\n", + " !nohup lt --port 7860 > srv.txt 2>&1 &\n", + " time.sleep(2)\n", + " !grep -o 'https[^ ]*' /content/srv.txt >srvr.txt\n", + " time.sleep(2)\n", + " srv= getoutput('cat /content/srvr.txt')\n", + "\n", + " for line in fileinput.input('/usr/local/lib/python3.8/dist-packages/gradio/blocks.py', inplace=True):\n", + " if line.strip().startswith('self.server_name ='):\n", + " line = f' self.server_name = \"{srv[8:]}\"\\n'\n", + " if line.strip().startswith('self.server_port ='):\n", + " line = ' self.server_port = 443\\n'\n", + " if line.strip().startswith('self.protocol = \"https\"'):\n", + " line = ' self.protocol = \"https\"\\n'\n", + " if line.strip().startswith('if self.local_url.startswith(\"https\") or self.is_colab'):\n", + " line = ''\n", + " if line.strip().startswith('else \"http\"'):\n", + " line = ''\n", + " sys.stdout.write(line)\n", + "\n", + "\n", + " !sed -i '13s@.*@ \"PUBLIC_SHARE_TRUE\": \"\u001b[32mConnected\",@' /usr/local/lib/python3.8/dist-packages/gradio/strings.py\n", + "\n", + " !rm /content/srv.txt\n", + " !rm /content/srvr.txt\n", + " clear_output()\n", + "\n", + "with capture.capture_output() as cap:\n", + " %cd /content/gdrive/MyDrive/sd/stablediffusion/\n", + "\n", + "if Model_Version == \"V2.1-768\":\n", + " configf=\"--config /content/gdrive/MyDrive/sd/stablediffusion/configs/stable-diffusion/v2-inference-v.yaml\"\n", + " !sed -i 's@def load_state_dict(checkpoint_path: str, map_location.*@def load_state_dict(checkpoint_path: str, map_location=\"cuda\"):@' /usr/local/lib/python3.8/dist-packages/open_clip/factory.py\n", + " NM=\"True\"\n", + "elif Model_Version == \"V2.1-512\":\n", + " configf=\"--config /content/gdrive/MyDrive/sd/stablediffusion/configs/stable-diffusion/v2-inference.yaml\"\n", + " !sed -i 's@def load_state_dict(checkpoint_path: str, map_location.*@def load_state_dict(checkpoint_path: str, map_location=\"cuda\"):@' /usr/local/lib/python3.8/dist-packages/open_clip/factory.py\n", + " NM=\"True\"\n", + "else:\n", + " configf=\"\"\n", + " !sed -i 's@def load_state_dict(checkpoint_path: str, map_location.*@def load_state_dict(checkpoint_path: str, map_location=\"cpu\"):@' /usr/local/lib/python3.8/dist-packages/open_clip/factory.py\n", + " NM=\"False\"\n", + "\n", + "if os.path.exists('/usr/local/lib/python3.8/dist-packages/xformers'):\n", + " xformers=\"--xformers\"\n", + "else:\n", + " xformers=\"\"\n", + "\n", + "if os.path.isfile(path_to_trained_model):\n", + " ckpt_dir=os.path.dirname(path_to_trained_model)\n", + " ckpt_cfg='--ckpt %s --ckpt-dir %s' % (path_to_trained_model, ckpt_dir)\n", + "else:\n", + " ckpt_cfg='--ckpt-dir %s' % path_to_trained_model\n", + "\n", + "!python /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py $share --disable-safe-unpickle --no-half-vae --enable-insecure-extension-access $ckpt_cfg $configf $xformers" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d_mQ23XsOc5R" + }, + "source": [ + "# Upload The Trained Model to Hugging Face" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "NTqUIuhROdH4" + }, + "outputs": [], + "source": [ + "from slugify import slugify\n", + "from huggingface_hub import HfApi, HfFolder, CommitOperationAdd\n", + "from huggingface_hub import create_repo\n", + "from IPython.display import display_markdown\n", + "from IPython.display import clear_output\n", + "from IPython.utils import capture\n", + "from google.colab import files\n", + "import shutil\n", + "import time\n", + "import os\n", + "\n", + "Upload_sample_images = False #@param {type:\"boolean\"}\n", + "#@markdown - Upload showcase images of your trained model\n", + "\n", + "Name_of_your_concept = \"\" #@param {type:\"string\"}\n", + "if(Name_of_your_concept == \"\"):\n", + " Name_of_your_concept = Session_Name\n", + "Name_of_your_concept=Name_of_your_concept.replace(\" \",\"-\")\n", + "\n", + "Save_concept_to = \"My_Profile\" #@param [\"Public_Library\", \"My_Profile\"]\n", + "\n", + "#@markdown - [Create a write access token](https://huggingface.co/settings/tokens) , go to \"New token\" -> Role : Write. A regular read token won't work here.\n", + "hf_token_write = \"\" #@param {type:\"string\"}\n", + "if hf_token_write ==\"\":\n", + " print('\u001b[1;32mYour Hugging Face write access token : ')\n", + " hf_token_write=input()\n", + "\n", + "hf_token = hf_token_write\n", + "\n", + "api = HfApi()\n", + "your_username = api.whoami(token=hf_token)[\"name\"]\n", + "\n", + "if(Save_concept_to == \"Public_Library\"):\n", + " repo_id = f\"sd-dreambooth-library/{slugify(Name_of_your_concept)}\"\n", + " #Join the Concepts Library organization if you aren't part of it already\n", + " !curl -X POST -H 'Authorization: Bearer '$hf_token -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX\n", + "else:\n", + " repo_id = f\"{your_username}/{slugify(Name_of_your_concept)}\"\n", + "output_dir = f'/content/models/'+INSTANCE_NAME\n", + "\n", + "def bar(prg):\n", + " br=\"\u001b[1;33mUploading to HuggingFace : \" '\u001b[0m|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ \"%\"\n", + " return br\n", + "\n", + "print(\"\u001b[1;32mLoading...\")\n", + "\n", + "NM=\"False\"\n", + "if os.path.getsize(OUTPUT_DIR+\"/text_encoder/pytorch_model.bin\") > 670901463:\n", + " NM=\"True\"\n", + "\n", + "\n", + "if NM==\"False\":\n", + " with capture.capture_output() as cap:\n", + " %cd $OUTPUT_DIR\n", + " !rm -r safety_checker feature_extractor .git\n", + " !rm model_index.json\n", + " !git init\n", + " !git lfs install --system --skip-repo\n", + " !git remote add -f origin \"https://USER:{hf_token}@huggingface.co/runwayml/stable-diffusion-v1-5\"\n", + " !git config core.sparsecheckout true\n", + " !echo -e \"feature_extractor\\nsafety_checker\\nmodel_index.json\" > .git/info/sparse-checkout\n", + " !git pull origin main\n", + " !rm -r .git\n", + " %cd /content\n", + "\n", + "image_string = \"\"\n", + "\n", + "if os.path.exists('/content/sample_images'):\n", + " !rm -r /content/sample_images\n", + "Samples=\"/content/sample_images\"\n", + "!mkdir $Samples\n", + "clear_output()\n", + "\n", + "if Upload_sample_images:\n", + "\n", + " print(\"\u001b[1;32mUpload Sample images of the model\")\n", + " uploaded = files.upload()\n", + " for filename in uploaded.keys():\n", + " shutil.move(filename, Samples)\n", + " %cd $Samples\n", + " !find . -name \"* *\" -type f | rename 's/ /_/g'\n", + " %cd /content\n", + " clear_output()\n", + "\n", + " print(bar(1))\n", + "\n", + " images_upload = os.listdir(Samples)\n", + " instance_prompt_list = []\n", + " for i, image in enumerate(images_upload):\n", + " image_string = f'''\n", + " {image_string}![{i}](https://huggingface.co/{repo_id}/resolve/main/sample_images/{image})\n", + " '''\n", + "\n", + "readme_text = f'''---\n", + "license: creativeml-openrail-m\n", + "tags:\n", + "- text-to-image\n", + "- stable-diffusion\n", + "---\n", + "### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)[\"name\"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook\n", + "\n", + "\n", + "Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)\n", + "Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)\n", + "\n", + "Sample pictures of this concept:\n", + "{image_string}\n", + "'''\n", + "#Save the readme to a file\n", + "readme_file = open(\"README.md\", \"w\")\n", + "readme_file.write(readme_text)\n", + "readme_file.close()\n", + "\n", + "operations = [\n", + " CommitOperationAdd(path_in_repo=\"README.md\", path_or_fileobj=\"README.md\"),\n", + " CommitOperationAdd(path_in_repo=f\"{Session_Name}.ckpt\",path_or_fileobj=MDLPTH)\n", + "\n", + "]\n", + "create_repo(repo_id,private=True, token=hf_token)\n", + "\n", + "api.create_commit(\n", + " repo_id=repo_id,\n", + " operations=operations,\n", + " commit_message=f\"Upload the concept {Name_of_your_concept} embeds and token\",\n", + " token=hf_token\n", + ")\n", + "\n", + "if NM==\"False\":\n", + " api.upload_folder(\n", + " folder_path=OUTPUT_DIR+\"/feature_extractor\",\n", + " path_in_repo=\"feature_extractor\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + " )\n", + "\n", + "clear_output()\n", + "print(bar(4))\n", + "\n", + "if NM==\"False\":\n", + " api.upload_folder(\n", + " folder_path=OUTPUT_DIR+\"/safety_checker\",\n", + " path_in_repo=\"safety_checker\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + " )\n", + "\n", + "clear_output()\n", + "print(bar(8))\n", + "\n", + "\n", + "api.upload_folder(\n", + " folder_path=OUTPUT_DIR+\"/scheduler\",\n", + " path_in_repo=\"scheduler\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + ")\n", + "\n", + "clear_output()\n", + "print(bar(9))\n", + "\n", + "api.upload_folder(\n", + " folder_path=OUTPUT_DIR+\"/text_encoder\",\n", + " path_in_repo=\"text_encoder\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + ")\n", + "\n", + "clear_output()\n", + "print(bar(12))\n", + "\n", + "api.upload_folder(\n", + " folder_path=OUTPUT_DIR+\"/tokenizer\",\n", + " path_in_repo=\"tokenizer\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + ")\n", + "\n", + "clear_output()\n", + "print(bar(13))\n", + "\n", + "api.upload_folder(\n", + " folder_path=OUTPUT_DIR+\"/unet\",\n", + " path_in_repo=\"unet\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + ")\n", + "\n", + "clear_output()\n", + "print(bar(21))\n", + "\n", + "api.upload_folder(\n", + " folder_path=OUTPUT_DIR+\"/vae\",\n", + " path_in_repo=\"vae\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + ")\n", + "\n", + "clear_output()\n", + "print(bar(23))\n", + "\n", + "api.upload_file(\n", + " path_or_fileobj=OUTPUT_DIR+\"/model_index.json\",\n", + " path_in_repo=\"model_index.json\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + ")\n", + "\n", + "clear_output()\n", + "print(bar(24))\n", + "\n", + "api.upload_folder(\n", + " folder_path=Samples,\n", + " path_in_repo=\"sample_images\",\n", + " repo_id=repo_id,\n", + " token=hf_token\n", + ")\n", + "\n", + "clear_output()\n", + "print(bar(25))\n", + "\n", + "display_markdown(f'''## Your concept was saved successfully. [Click here to access it](https://huggingface.co/{repo_id})\n", + "''', raw=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "iVqNi8IDzA1Z" + }, + "outputs": [], + "source": [ + "#@markdown #Free Gdrive Space\n", + "\n", + "#@markdown Display the list of sessions from your gdrive and choose which ones to remove.\n", + "\n", + "import ipywidgets as widgets\n", + "\n", + "Sessions=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\")\n", + "\n", + "s = widgets.Select(\n", + " options=Sessions,\n", + " rows=5,\n", + " description='',\n", + " disabled=False\n", + ")\n", + "\n", + "out=widgets.Output()\n", + "\n", + "d = widgets.Button(\n", + " description='Remove',\n", + " disabled=False,\n", + " button_style='warning',\n", + " tooltip='Removet the selected session',\n", + " icon='warning'\n", + ")\n", + "\n", + "def rem(d):\n", + " with out:\n", + " if s.value is not None:\n", + " clear_output()\n", + " print(\"\u001b[1;33mTHE SESSION \u001b[1;31m\"+s.value+\" \u001b[1;33mHAS BEEN REMOVED FROM YOUR GDRIVE\")\n", + " !rm -r '/content/gdrive/MyDrive/Fast-Dreambooth/Sessions/{s.value}'\n", + " s.options=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\")\n", + " else:\n", + " d.close()\n", + " s.close()\n", + " clear_output()\n", + " print(\"\u001b[1;32mNOTHING TO REMOVE\")\n", + "\n", + "d.on_click(rem)\n", + "if s.value is not None:\n", + " display(s,d,out)\n", + "else:\n", + " print(\"\u001b[1;32mNOTHING TO REMOVE\")" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [ + "bbKbx185zqlz", + "AaLtXBbPleBr" + ], + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 9e3167550a9f2ae6e614d772e601e31b538b57ad Mon Sep 17 00:00:00 2001 From: zuencap <37028435+zuencap@users.noreply.github.com> Date: Tue, 20 Dec 2022 15:02:09 +0100 Subject: [PATCH 2/4] Enable switch between checkpoint copies in webui --- fast-DreamBooth.ipynb | 184 +++++++++++++++++++++--------------------- 1 file changed, 93 insertions(+), 91 deletions(-) diff --git a/fast-DreamBooth.ipynb b/fast-DreamBooth.ipynb index f06f2719..0d9031a1 100644 --- a/fast-DreamBooth.ipynb +++ b/fast-DreamBooth.ipynb @@ -6,7 +6,7 @@ "id": "qEsNHTtVlbkV" }, "source": [ - "# **fast-DreamBooth colab From https://github.com/TheLastBen/fast-stable-diffusion, if you face any issues, feel free to discuss them.** \n", + "# **fast-DreamBooth colab From https://github.com/TheLastBen/fast-stable-diffusion, if you face any issues, feel free to discuss them.**\n", "Keep your notebook updated for best experience. [Support](https://ko-fi.com/thelastben)\n" ] }, @@ -51,7 +51,7 @@ " !pip uninstall -y diffusers\n", " !git clone --branch updt https://github.com/TheLastBen/diffusers\n", " !pip install -q /content/diffusers\n", - "print('\u001b[1;32mDONE !') " + "print('\u001b[1;32mDONE !')" ] }, { @@ -87,7 +87,7 @@ "\n", "#@markdown ---\n", "\n", - "with capture.capture_output() as cap: \n", + "with capture.capture_output() as cap:\n", " %cd /content/\n", "\n", "Huggingface_Token = \"\" #@param {type:\"string\"}\n", @@ -140,12 +140,12 @@ " !rm -r /content/stable-diffusion-v1-5/.git\n", " %cd /content/stable-diffusion-v1-5\n", " !rm model_index.json\n", - " time.sleep(1) \n", + " time.sleep(1)\n", " wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n", " !sed -i 's@\"clip_sample\": false@@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n", " !sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n", - " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-v1-5/vae/config.json \n", - " %cd /content/ \n", + " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-v1-5/vae/config.json\n", + " %cd /content/\n", " clear_output()\n", " print('\u001b[1;32mDONE !')\n", " else:\n", @@ -183,7 +183,7 @@ " !git pull origin main\n", " clear_output()\n", " print('\u001b[1;32mDONE !')\n", - " \n", + "\n", "\n", "if Path_to_HuggingFace != \"\":\n", " if Custom_Model_Version=='V2.1-512px' or Custom_Model_Version=='V2.1-768px':\n", @@ -202,8 +202,8 @@ " !git pull origin main\n", " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", " !rm -r /content/stable-diffusion-custom/.git\n", - " %cd /content/ \n", - " MODEL_NAME=\"/content/stable-diffusion-custom\" \n", + " %cd /content/\n", + " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", " clear_output()\n", " print('\u001b[1;32mDONE !')\n", " else:\n", @@ -234,24 +234,24 @@ " wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n", " !sed -i 's@\"clip_sample\": false,@@g' /content/stable-diffusion-custom/scheduler/scheduler_config.json\n", " !sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-custom/scheduler/scheduler_config.json\n", - " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-custom/vae/config.json \n", - " %cd /content/ \n", - " MODEL_NAME=\"/content/stable-diffusion-custom\" \n", + " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-custom/vae/config.json\n", + " %cd /content/\n", + " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", " clear_output()\n", " print('\u001b[1;32mDONE !')\n", " else:\n", " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", " print('\u001b[1;31mCheck the link you provided')\n", - " time.sleep(5) \n", + " time.sleep(5)\n", "\n", "elif CKPT_Path !=\"\":\n", " %cd /content\n", - " clear_output() \n", + " clear_output()\n", " if os.path.exists(str(CKPT_Path)):\n", " if Custom_Model_Version=='1.5':\n", " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", " !unzip -o -q refmdlz\n", - " !rm -f refmdlz \n", + " !rm -f refmdlz\n", " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", " clear_output()\n", " !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v1\n", @@ -277,19 +277,19 @@ " else:\n", " while not os.path.exists(str(CKPT_Path)):\n", " print('\u001b[1;31mWrong path, use the colab file explorer to copy the path')\n", - " time.sleep(5) \n", + " time.sleep(5)\n", "\n", - "elif CKPT_Link !=\"\": \n", + "elif CKPT_Link !=\"\":\n", " %cd /content\n", - " clear_output() \n", + " clear_output()\n", " !gdown --fuzzy -O model.ckpt $CKPT_Link\n", - " clear_output() \n", + " clear_output()\n", " if os.path.exists('/content/model.ckpt'):\n", " if os.path.getsize(\"/content/model.ckpt\") > 1810671599:\n", " if Custom_Model_Version=='1.5':\n", " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", " !unzip -o -q refmdlz\n", - " !rm -f refmdlz \n", + " !rm -f refmdlz\n", " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", " clear_output()\n", " !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v1\n", @@ -317,7 +317,7 @@ " while os.path.getsize('/content/model.ckpt') < 1810671599:\n", " print('\u001b[1;31mWrong link, check that the link is valid')\n", " time.sleep(5)\n", - " \n", + "\n", "else:\n", " if Model_Version==\"1.5\":\n", " if not os.path.exists('/content/stable-diffusion-v1-5'):\n", @@ -332,14 +332,14 @@ " MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n", " else:\n", " MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n", - " print(\"\u001b[1;32mThe v2-512px model already exists, using this model.\") \n", + " print(\"\u001b[1;32mThe v2-512px model already exists, using this model.\")\n", " elif Model_Version==\"V2.1-768px\":\n", - " if not os.path.exists('/content/stable-diffusion-v2-768'): \n", + " if not os.path.exists('/content/stable-diffusion-v2-768'):\n", " newdownloadmodel()\n", " MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n", " else:\n", " MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n", - " print(\"\u001b[1;32mThe v2-768px model already exists, using this model.\") " + " print(\"\u001b[1;32mThe v2-768px model already exists, using this model.\")" ] }, { @@ -375,12 +375,12 @@ " pass\n", "except:\n", " MODEL_NAME=\"\"\n", - " \n", + "\n", "PT=\"\"\n", "\n", "Session_Name = \"\" #@param{type: 'string'}\n", "while Session_Name==\"\":\n", - " print('\u001b[1;31mInput the Session Name:') \n", + " print('\u001b[1;31mInput the Session Name:')\n", " Session_Name=input('')\n", "Session_Name=Session_Name.replace(\" \",\"_\")\n", "\n", @@ -406,7 +406,7 @@ " !rm -r instance_images\n", " !unzip instance_images.zip\n", " !rm -r concept_images\n", - " !unzip concept_images.zip \n", + " !unzip concept_images.zip\n", " %cd /content\n", "\n", "\n", @@ -423,24 +423,24 @@ "\n", "if os.path.exists(str(SESSION_DIR)):\n", " mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(\".\")[-1]==\"ckpt\"]\n", - " if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls): \n", - " \n", - " def f(n): \n", + " if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):\n", + "\n", + " def f(n):\n", " k=0\n", - " for i in mdls: \n", - " if k==n: \n", + " for i in mdls:\n", + " if k==n:\n", " !mv \"$SESSION_DIR/$i\" $MDLPTH\n", " k=k+1\n", "\n", " k=0\n", " print('\u001b[1;33mNo final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\\n\u001b[1;34m')\n", "\n", - " for i in mdls: \n", + " for i in mdls:\n", " print(str(k)+'- '+i)\n", " k=k+1\n", " n=input()\n", " while int(n)>k-1:\n", - " n=input() \n", + " n=input()\n", " if n!=\"000\":\n", " f(int(n))\n", " print('\u001b[1;32mUsing the model '+ mdls[int(n)]+\" ...\")\n", @@ -449,7 +449,7 @@ " print('\u001b[1;32mSkipping the intermediary checkpoints.')\n", " del n\n", "\n", - " \n", + "\n", "if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):\n", " print('\u001b[1;32mLoading session with no previous model, using the original model or the custom downloaded model')\n", " if MODEL_NAME==\"\":\n", @@ -462,7 +462,7 @@ " if Model_Version=='1.5':\n", " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", " !unzip -o -q refmdlz\n", - " !rm -f refmdlz \n", + " !rm -f refmdlz\n", " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", " clear_output()\n", " print('\u001b[1;32mSession found, loading the trained model ...')\n", @@ -478,12 +478,12 @@ " clear_output()\n", " print('\u001b[1;32mSession found, loading the trained model ...')\n", " !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v2 --reference_model stabilityai/stable-diffusion-2-1\n", - " !rm /content/convertodiff.py \n", + " !rm /content/convertodiff.py\n", " if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", - " resume=True \n", + " resume=True\n", " clear_output()\n", " print('\u001b[1;32mSession loaded.')\n", - " else: \n", + " else:\n", " if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", " print('\u001b[1;31mConversion error, if the error persists, remove the CKPT file from the current session folder')\n", "\n", @@ -495,7 +495,7 @@ " else:\n", " print('\u001b[1;32mSession created, proceed to uploading instance images')\n", "\n", - " #@markdown \n", + " #@markdown\n", "\n", " #@markdown # The most importent step is to rename the instance pictures of each subject to a unique unknown identifier, example :\n", " #@markdown - If you have 30 pictures of yourself, simply select them all and rename only one to the chosen identifier for example : phtmejhn, the files would be : phtmejhn (1).jpg, phtmejhn (2).png ....etc then upload them, do the same for other people or objects with a different identifier, and that's it.\n", @@ -555,7 +555,7 @@ " new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n", " file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n", " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size): \n", + " if file.size !=(Crop_size, Crop_size):\n", " side_length = min(width, height)\n", " left = (width - side_length)/2\n", " top = (height - side_length)/2\n", @@ -573,7 +573,7 @@ " else:\n", " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", " %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$INSTANCE_DIR\"\n", - " \n", + "\n", " print('\\n\u001b[1;32mDone, proceed to the next cell')\n", "\n", "\n", @@ -587,7 +587,7 @@ " new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n", " file = Image.open(new_path_with_file)\n", " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size): \n", + " if file.size !=(Crop_size, Crop_size):\n", " side_length = min(width, height)\n", " left = (width - side_length)/2\n", " top = (height - side_length)/2\n", @@ -609,7 +609,7 @@ "\n", "with capture.capture_output() as cap:\n", " %cd \"$INSTANCE_DIR\"\n", - " !find . -name \"* *\" -type f | rename 's/ /-/g' \n", + " !find . -name \"* *\" -type f | rename 's/ /-/g'\n", "\n", " %cd $SESSION_DIR\n", " !rm instance_images.zip\n", @@ -653,7 +653,7 @@ "\n", "#@markdown - If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) concept images. Leave EMPTY to upload.\n", "\n", - "Crop_images= True \n", + "Crop_images= True\n", "Crop_size = \"512\"\n", "Crop_size=int(Crop_size)\n", "\n", @@ -669,7 +669,7 @@ " new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n", " file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n", " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size): \n", + " if file.size !=(Crop_size, Crop_size):\n", " side_length = min(width, height)\n", " left = (width - side_length)/2\n", " top = (height - side_length)/2\n", @@ -687,7 +687,7 @@ " else:\n", " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", " %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$CONCEPT_DIR\"\n", - " \n", + "\n", "elif IMAGES_FOLDER_OPTIONAL ==\"\":\n", " uploaded = files.upload()\n", " if Crop_images:\n", @@ -698,7 +698,7 @@ " new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n", " file = Image.open(new_path_with_file)\n", " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size): \n", + " if file.size !=(Crop_size, Crop_size):\n", " side_length = min(width, height)\n", " left = (width - side_length)/2\n", " top = (height - side_length)/2\n", @@ -716,9 +716,9 @@ " shutil.move(filename, CONCEPT_DIR)\n", " clear_output()\n", "\n", - " \n", + "\n", "print('\\n\u001b[1;32mAlmost done...')\n", - "with capture.capture_output() as cap: \n", + "with capture.capture_output() as cap:\n", " i=0\n", " for filename in os.listdir(CONCEPT_DIR):\n", " extension = filename.split(\".\")[-1]\n", @@ -768,7 +768,7 @@ " %rm -r $INSTANCE_DIR\"/.ipynb_checkpoints\"\n", "\n", "if os.path.exists(CONCEPT_DIR+\"/.ipynb_checkpoints\"):\n", - " %rm -r $CONCEPT_DIR\"/.ipynb_checkpoints\" \n", + " %rm -r $CONCEPT_DIR\"/.ipynb_checkpoints\"\n", "\n", "Resume_Training = False #@param {type:\"boolean\"}\n", "\n", @@ -813,7 +813,7 @@ "if UNet_Training_Steps==0:\n", " trnonltxt=\"--train_only_text_encoder\"\n", "\n", - "Seed='' \n", + "Seed=''\n", "\n", "Style_Training = False #@param {type:\"boolean\"}\n", "\n", @@ -862,7 +862,7 @@ "if os.path.getsize(MODELT_NAME+\"/text_encoder/pytorch_model.bin\") > 670901463:\n", " V2=True\n", "\n", - "Enable_text_encoder_training= True \n", + "Enable_text_encoder_training= True\n", "Enable_Text_Encoder_Concept_Training= True\n", "\n", "if Text_Encoder_Training_Steps==0:\n", @@ -903,7 +903,7 @@ "#@markdown - Auto-disconnect from google colab after the training to avoid wasting compute units.\n", "\n", "def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):\n", - " \n", + "\n", " !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n", " $trnonltxt \\\n", " --image_captions_filename \\\n", @@ -927,7 +927,7 @@ "def train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps):\n", " clear_output()\n", " if resuming==\"Yes\":\n", - " print('\u001b[1;32mResuming Training...\u001b[0m') \n", + " print('\u001b[1;32mResuming Training...\u001b[0m')\n", " print('\u001b[1;33mTraining the UNet...\u001b[0m')\n", " !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n", " $Style \\\n", @@ -960,7 +960,7 @@ "if Enable_Text_Encoder_Concept_Training and os.listdir(CONCEPT_DIR)!=[]:\n", " clear_output()\n", " if resuming==\"Yes\":\n", - " print('\u001b[1;32mResuming Training...\u001b[0m') \n", + " print('\u001b[1;32mResuming Training...\u001b[0m')\n", " print('\u001b[1;33mTraining the text encoder on the concept...\u001b[0m')\n", " dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)\n", "elif Enable_Text_Encoder_Concept_Training and os.listdir(CONCEPT_DIR)==[]:\n", @@ -968,7 +968,7 @@ " time.sleep(8)\n", "if UNet_Training_Steps!=0:\n", " train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps=UNet_Training_Steps)\n", - " \n", + "\n", "\n", "if os.path.exists('/content/models/'+INSTANCE_NAME+'/unet/diffusion_pytorch_model.bin'):\n", " prc=\"--fp16\" if precision==\"fp16\" else \"\"\n", @@ -979,11 +979,11 @@ " clear_output()\n", " print(\"\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\")\n", " if Disconnect_after_training :\n", - " time.sleep(20) \n", - " runtime.unassign() \n", + " time.sleep(20)\n", + " runtime.unassign()\n", " else:\n", - " print(\"\u001b[1;31mSomething went wrong\") \n", - " else: \n", + " print(\"\u001b[1;31mSomething went wrong\")\n", + " else:\n", " !wget -O /content/convertosd.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertosd.py\n", " clear_output()\n", " if precision==\"no\":\n", @@ -993,14 +993,14 @@ " !python /content/convertosd.py\n", " !rm /content/convertosd.py\n", " clear_output()\n", - " if os.path.exists(SESSION_DIR+\"/\"+INSTANCE_NAME+'.ckpt'): \n", + " if os.path.exists(SESSION_DIR+\"/\"+INSTANCE_NAME+'.ckpt'):\n", " print(\"\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\")\n", " if Disconnect_after_training :\n", " time.sleep(20)\n", " runtime.unassign()\n", " else:\n", " print(\"\u001b[1;31mSomething went wrong\")\n", - " \n", + "\n", "else:\n", " print(\"\u001b[1;31mSomething went wrong\")" ] @@ -1018,7 +1018,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "cellView": "form", "id": "iAZGngFcI8hq" }, "outputs": [], @@ -1045,7 +1044,7 @@ "\n", "try:\n", " INSTANCE_NAME\n", - " INSTANCET=INSTANCE_NAME \n", + " INSTANCET=INSTANCE_NAME\n", "except:\n", " pass\n", "#@markdown - if checked, an input box will ask the full path to a desired model.\n", @@ -1070,12 +1069,12 @@ "except:\n", " print('\u001b[1;31mIt seems that you did not perform training during this session \u001b[1;32mor you chose to use a custom path,\\nprovide the full path to the model (including the name of the model):\\n')\n", " path_to_trained_model=input()\n", - " \n", + "\n", "while not os.path.exists(path_to_trained_model):\n", " print(\"\u001b[1;31mThe model doesn't exist on you Gdrive, use the file explorer to get the path : \")\n", " path_to_trained_model=input()\n", "\n", - " \n", + "\n", "with capture.capture_output() as cap:\n", " %cd /content/gdrive/MyDrive/\n", " %mkdir sd\n", @@ -1090,10 +1089,10 @@ " !wget -O /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/shared.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/modules/shared.py\n", "\n", "if Update_repo:\n", - " with capture.capture_output() as cap: \n", - " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.sh \n", + " with capture.capture_output() as cap:\n", + " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.sh\n", " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/paths.py\n", - " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py \n", + " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py\n", " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/ui.py\n", " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/style.css\n", " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", @@ -1102,7 +1101,7 @@ "\n", "\n", "with capture.capture_output() as cap:\n", - " \n", + "\n", " if not os.path.exists('/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion'):\n", " !mkdir /content/gdrive/MyDrive/sd/stablediffusion/src\n", " %cd /content/gdrive/MyDrive/sd/stablediffusion/src\n", @@ -1112,12 +1111,12 @@ " !git clone https://github.com/sczhou/CodeFormer\n", " !git clone https://github.com/crowsonkb/k-diffusion\n", " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/CLIP /content/gdrive/MyDrive/sd/stablediffusion/src/clip\n", - " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/BLIP /content/gdrive/MyDrive/sd/stablediffusion/src/blip \n", - " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/CodeFormer /content/gdrive/MyDrive/sd/stablediffusion/src/codeformer \n", - " !cp -r /content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion /content/gdrive/MyDrive/sd/stable-diffusion-webui/ \n", + " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/BLIP /content/gdrive/MyDrive/sd/stablediffusion/src/blip\n", + " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/CodeFormer /content/gdrive/MyDrive/sd/stablediffusion/src/codeformer\n", + " !cp -r /content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", "\n", "\n", - "with capture.capture_output() as cap: \n", + "with capture.capture_output() as cap:\n", " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules\n", " !wget -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py\n", "\n", @@ -1125,14 +1124,14 @@ " if not os.path.exists('/tools/node/bin/lt'):\n", " !npm install -g localtunnel\n", "\n", - "with capture.capture_output() as cap: \n", + "with capture.capture_output() as cap:\n", " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", " time.sleep(1)\n", " !wget -O webui.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.py\n", " !sed -i 's@ui.create_ui().*@ui.create_ui();shared.demo.queue(concurrency_count=999999,status_update_rate=0.1)@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py\n", " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/\n", " !wget -O ui.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/modules/ui.py\n", - " !sed -i 's@css = \"\".*@with open(os.path.join(script_path, \"style.css\"), \"r\", encoding=\"utf8\") as file:\\n css = file.read()@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/ui.py \n", + " !sed -i 's@css = \"\".*@with open(os.path.join(script_path, \"style.css\"), \"r\", encoding=\"utf8\") as file:\\n css = file.read()@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/ui.py\n", " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui\n", " !wget -O style.css https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/style.css\n", " !sed -i 's@min-height: 4.*@min-height: 5.5em;@g' /content/gdrive/MyDrive/sd/stable-diffusion-webui/style.css\n", @@ -1163,7 +1162,7 @@ " line = ' self.server_port = server_port\\n'\n", " sys.stdout.write(line)\n", " clear_output()\n", - " \n", + "\n", "else:\n", " share=''\n", " !nohup lt --port 7860 > srv.txt 2>&1 &\n", @@ -1180,14 +1179,14 @@ " if line.strip().startswith('self.protocol = \"https\"'):\n", " line = ' self.protocol = \"https\"\\n'\n", " if line.strip().startswith('if self.local_url.startswith(\"https\") or self.is_colab'):\n", - " line = '' \n", + " line = ''\n", " if line.strip().startswith('else \"http\"'):\n", - " line = '' \n", + " line = ''\n", " sys.stdout.write(line)\n", - " \n", + "\n", "\n", " !sed -i '13s@.*@ \"PUBLIC_SHARE_TRUE\": \"\u001b[32mConnected\",@' /usr/local/lib/python3.8/dist-packages/gradio/strings.py\n", - " \n", + "\n", " !rm /content/srv.txt\n", " !rm /content/srvr.txt\n", " clear_output()\n", @@ -1209,14 +1208,17 @@ " NM=\"False\"\n", "\n", "if os.path.exists('/usr/local/lib/python3.8/dist-packages/xformers'):\n", - " xformers=\"--xformers\" \n", + " xformers=\"--xformers\"\n", "else:\n", " xformers=\"\"\n", "\n", "if os.path.isfile(path_to_trained_model):\n", - " !python /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py $share --disable-safe-unpickle --no-half-vae --enable-insecure-extension-access --ckpt \"$path_to_trained_model\" $configf $xformers\n", + " ckpt_dir=os.path.dirname(path_to_trained_model)\n", + " ckpt_cfg='--ckpt %s --ckpt-dir %s' % (path_to_trained_model, ckpt_dir)\n", "else:\n", - " !python /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py $share --disable-safe-unpickle --no-half-vae --enable-insecure-extension-access --ckpt-dir \"$path_to_trained_model\" $configf $xformers" + " ckpt_cfg='--ckpt-dir %s' % path_to_trained_model\n", + "\n", + "!python /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py $share --disable-safe-unpickle --no-half-vae --enable-insecure-extension-access $ckpt_cfg $configf $xformers" ] }, { @@ -1225,7 +1227,7 @@ "id": "d_mQ23XsOc5R" }, "source": [ - "# Upload The Trained Model to Hugging Face " + "# Upload The Trained Model to Hugging Face" ] }, { @@ -1254,8 +1256,8 @@ "Name_of_your_concept = \"\" #@param {type:\"string\"}\n", "if(Name_of_your_concept == \"\"):\n", " Name_of_your_concept = Session_Name\n", - "Name_of_your_concept=Name_of_your_concept.replace(\" \",\"-\") \n", - " \n", + "Name_of_your_concept=Name_of_your_concept.replace(\" \",\"-\")\n", + "\n", "Save_concept_to = \"My_Profile\" #@param [\"Public_Library\", \"My_Profile\"]\n", "\n", "#@markdown - [Create a write access token](https://huggingface.co/settings/tokens) , go to \"New token\" -> Role : Write. A regular read token won't work here.\n", @@ -1329,7 +1331,7 @@ " image_string = f'''\n", " {image_string}![{i}](https://huggingface.co/{repo_id}/resolve/main/sample_images/{image})\n", " '''\n", - " \n", + "\n", "readme_text = f'''---\n", "license: creativeml-openrail-m\n", "tags:\n", @@ -1501,7 +1503,7 @@ " clear_output()\n", " print(\"\u001b[1;33mTHE SESSION \u001b[1;31m\"+s.value+\" \u001b[1;33mHAS BEEN REMOVED FROM YOUR GDRIVE\")\n", " !rm -r '/content/gdrive/MyDrive/Fast-Dreambooth/Sessions/{s.value}'\n", - " s.options=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\") \n", + " s.options=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\")\n", " else:\n", " d.close()\n", " s.close()\n", @@ -1535,4 +1537,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 3474401839039dca251c8535dc9ed9fe0becf6f0 Mon Sep 17 00:00:00 2001 From: zuencap <37028435+zuencap@users.noreply.github.com> Date: Tue, 20 Dec 2022 15:08:45 +0100 Subject: [PATCH 3/4] Delete fast_DreamBooth.ipynb --- fast_DreamBooth.ipynb | 1665 ----------------------------------------- 1 file changed, 1665 deletions(-) delete mode 100644 fast_DreamBooth.ipynb diff --git a/fast_DreamBooth.ipynb b/fast_DreamBooth.ipynb deleted file mode 100644 index 06008218..00000000 --- a/fast_DreamBooth.ipynb +++ /dev/null @@ -1,1665 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "qEsNHTtVlbkV" - }, - "source": [ - "# **fast-DreamBooth colab From https://github.com/TheLastBen/fast-stable-diffusion, if you face any issues, feel free to discuss them.**\n", - "Keep your notebook updated for best experience. [Support](https://ko-fi.com/thelastben)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "A4Bae3VP6UsE", - "outputId": "c0347d29-b411-4a46-8ceb-b7ca0081c71a", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Mounted at /content/gdrive\n" - ] - } - ], - "source": [ - "from google.colab import drive\n", - "drive.mount('/content/gdrive')" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "cellView": "form", - "id": "QyvcqeiL65Tj", - "outputId": "61b76d84-6fdf-4da1-eeaa-7ca38365dd5a", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[1;32mDONE !\n" - ] - } - ], - "source": [ - "#@markdown # Dependencies\n", - "\n", - "from IPython.utils import capture\n", - "import time\n", - "\n", - "with capture.capture_output() as cap:\n", - " %cd /content/\n", - " !pip install -q accelerate==0.12.0\n", - " for i in range(1,6):\n", - " !wget -q \"https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dependencies/Dependencies.{i}\"\n", - " !mv \"Dependencies.{i}\" \"Dependencies.7z.00{i}\"\n", - " !7z x -y Dependencies.7z.001\n", - " time.sleep(2)\n", - " !cp -r /content/usr/local/lib/python3.8/dist-packages /usr/local/lib/python3.8/\n", - " !rm -r /content/usr\n", - " for i in range(1,6):\n", - " !rm \"Dependencies.7z.00{i}\"\n", - " !pip uninstall -y diffusers\n", - " !git clone --branch updt https://github.com/TheLastBen/diffusers\n", - " !pip install -q /content/diffusers\n", - "print('\u001b[1;32mDONE !')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "R3SsbIlxw66N" - }, - "source": [ - "# Model Download" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "cellView": "form", - "id": "O3KHGKqyeJp9", - "outputId": "7a02fbf9-ff65-4c2a-9f7c-57c82f748893", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[1;32mDONE !\n" - ] - } - ], - "source": [ - "import os\n", - "import time\n", - "from IPython.display import clear_output\n", - "import wget\n", - "\n", - "#@markdown - Skip this cell if you are loading a previous session\n", - "\n", - "#@markdown ---\n", - "\n", - "Model_Version = \"V2.1-512px\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n", - "\n", - "#@markdown - Choose which version to finetune.\n", - "\n", - "#@markdown ---\n", - "\n", - "with capture.capture_output() as cap:\n", - " %cd /content/\n", - "\n", - "Huggingface_Token = \"\" #@param {type:\"string\"}\n", - "token=Huggingface_Token\n", - "\n", - "#@markdown - Leave EMPTY if you're using the v2 model.\n", - "#@markdown - Make sure you've accepted the terms in https://huggingface.co/runwayml/stable-diffusion-v1-5\n", - "\n", - "#@markdown ---\n", - "Custom_Model_Version=\"1.5\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n", - "#@markdown - Choose wisely!\n", - "\n", - "Path_to_HuggingFace= \"\" #@param {type:\"string\"}\n", - "\n", - "\n", - "#@markdown - Load and finetune a model from Hugging Face, must specify if v2, use the format \"profile/model\" like : runwayml/stable-diffusion-v1-5\n", - "\n", - "#@markdown Or\n", - "\n", - "CKPT_Path = \"\" #@param {type:\"string\"}\n", - "\n", - "#@markdown Or\n", - "\n", - "CKPT_Link = \"\" #@param {type:\"string\"}\n", - "\n", - "#@markdown - A CKPT direct link, huggingface CKPT link or a shared CKPT from gdrive.\n", - "#@markdown ---\n", - "\n", - "def downloadmodel():\n", - " token=Huggingface_Token\n", - " if token==\"\":\n", - " token=input(\"Insert your huggingface token :\")\n", - " if os.path.exists('/content/stable-diffusion-v1-5'):\n", - " !rm -r /content/stable-diffusion-v1-5\n", - " clear_output()\n", - "\n", - " %cd /content/\n", - " clear_output()\n", - " !mkdir /content/stable-diffusion-v1-5\n", - " %cd /content/stable-diffusion-v1-5\n", - " !git init\n", - " !git lfs install --system --skip-repo\n", - " !git remote add -f origin \"https://USER:{token}@huggingface.co/runwayml/stable-diffusion-v1-5\"\n", - " !git config core.sparsecheckout true\n", - " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nmodel_index.json\" > .git/info/sparse-checkout\n", - " !git pull origin main\n", - " if os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n", - " !git clone \"https://USER:{token}@huggingface.co/stabilityai/sd-vae-ft-mse\"\n", - " !mv /content/stable-diffusion-v1-5/sd-vae-ft-mse /content/stable-diffusion-v1-5/vae\n", - " !rm -r /content/stable-diffusion-v1-5/.git\n", - " %cd /content/stable-diffusion-v1-5\n", - " !rm model_index.json\n", - " time.sleep(1)\n", - " wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n", - " !sed -i 's@\"clip_sample\": false@@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n", - " !sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json\n", - " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-v1-5/vae/config.json\n", - " %cd /content/\n", - " clear_output()\n", - " print('\u001b[1;32mDONE !')\n", - " else:\n", - " while not os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n", - " print('\u001b[1;31mMake sure you accepted the terms in https://huggingface.co/runwayml/stable-diffusion-v1-5')\n", - " time.sleep(5)\n", - "\n", - "def newdownloadmodel():\n", - "\n", - " %cd /content/\n", - " clear_output()\n", - " !mkdir /content/stable-diffusion-v2-768\n", - " %cd /content/stable-diffusion-v2-768\n", - " !git init\n", - " !git lfs install --system --skip-repo\n", - " !git remote add -f origin \"https://USER:{token}@huggingface.co/stabilityai/stable-diffusion-2-1\"\n", - " !git config core.sparsecheckout true\n", - " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nmodel_index.json\" > .git/info/sparse-checkout\n", - " !git pull origin main\n", - " clear_output()\n", - " print('\u001b[1;32mDONE !')\n", - "\n", - "\n", - "def newdownloadmodelb():\n", - "\n", - " %cd /content/\n", - " clear_output()\n", - " !mkdir /content/stable-diffusion-v2-512\n", - " %cd /content/stable-diffusion-v2-512\n", - " !git init\n", - " !git lfs install --system --skip-repo\n", - " !git remote add -f origin \"https://USER:{token}@huggingface.co/stabilityai/stable-diffusion-2-1-base\"\n", - " !git config core.sparsecheckout true\n", - " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nmodel_index.json\" > .git/info/sparse-checkout\n", - " !git pull origin main\n", - " clear_output()\n", - " print('\u001b[1;32mDONE !')\n", - "\n", - "\n", - "if Path_to_HuggingFace != \"\":\n", - " if Custom_Model_Version=='V2.1-512px' or Custom_Model_Version=='V2.1-768px':\n", - " if os.path.exists('/content/stable-diffusion-custom'):\n", - " !rm -r /content/stable-diffusion-custom\n", - " clear_output()\n", - " %cd /content/\n", - " clear_output()\n", - " !mkdir /content/stable-diffusion-custom\n", - " %cd /content/stable-diffusion-custom\n", - " !git init\n", - " !git lfs install --system --skip-repo\n", - " !git remote add -f origin \"https://USER:{token}@huggingface.co/{Path_to_HuggingFace}\"\n", - " !git config core.sparsecheckout true\n", - " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nvae\\nmodel_index.json\" > .git/info/sparse-checkout\n", - " !git pull origin main\n", - " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " !rm -r /content/stable-diffusion-custom/.git\n", - " %cd /content/\n", - " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", - " clear_output()\n", - " print('\u001b[1;32mDONE !')\n", - " else:\n", - " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " print('\u001b[1;31mCheck the link you provided')\n", - " time.sleep(5)\n", - " else:\n", - " if os.path.exists('/content/stable-diffusion-custom'):\n", - " !rm -r /content/stable-diffusion-custom\n", - " clear_output()\n", - " %cd /content/\n", - " clear_output()\n", - " !mkdir /content/stable-diffusion-custom\n", - " %cd /content/stable-diffusion-custom\n", - " !git init\n", - " !git lfs install --system --skip-repo\n", - " !git remote add -f origin \"https://USER:{token}@huggingface.co/{Path_to_HuggingFace}\"\n", - " !git config core.sparsecheckout true\n", - " !echo -e \"scheduler\\ntext_encoder\\ntokenizer\\nunet\\nmodel_index.json\" > .git/info/sparse-checkout\n", - " !git pull origin main\n", - " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " !git clone \"https://USER:{token}@huggingface.co/stabilityai/sd-vae-ft-mse\"\n", - " !mv /content/stable-diffusion-custom/sd-vae-ft-mse /content/stable-diffusion-custom/vae\n", - " !rm -r /content/stable-diffusion-custom/.git\n", - " %cd /content/stable-diffusion-custom\n", - " !rm model_index.json\n", - " time.sleep(1)\n", - " wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')\n", - " !sed -i 's@\"clip_sample\": false,@@g' /content/stable-diffusion-custom/scheduler/scheduler_config.json\n", - " !sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-custom/scheduler/scheduler_config.json\n", - " !sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-custom/vae/config.json\n", - " %cd /content/\n", - " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", - " clear_output()\n", - " print('\u001b[1;32mDONE !')\n", - " else:\n", - " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " print('\u001b[1;31mCheck the link you provided')\n", - " time.sleep(5)\n", - "\n", - "elif CKPT_Path !=\"\":\n", - " %cd /content\n", - " clear_output()\n", - " if os.path.exists(str(CKPT_Path)):\n", - " if Custom_Model_Version=='1.5':\n", - " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", - " !unzip -o -q refmdlz\n", - " !rm -f refmdlz\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", - " clear_output()\n", - " !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v1\n", - " !rm -r /content/refmdl\n", - " elif Custom_Model_Version=='V2.1-512px':\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", - " clear_output()\n", - " !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n", - " elif Custom_Model_Version=='V2.1-768px':\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", - " clear_output()\n", - " !python /content/convertodiff.py \"$CKPT_Path\" /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1\n", - " !rm /content/convertodiff.py\n", - " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " clear_output()\n", - " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", - " print('\u001b[1;32mDONE !')\n", - " else:\n", - " !rm -r /content/stable-diffusion-custom\n", - " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " print('\u001b[1;31mConversion error')\n", - " time.sleep(5)\n", - " else:\n", - " while not os.path.exists(str(CKPT_Path)):\n", - " print('\u001b[1;31mWrong path, use the colab file explorer to copy the path')\n", - " time.sleep(5)\n", - "\n", - "elif CKPT_Link !=\"\":\n", - " %cd /content\n", - " clear_output()\n", - " !gdown --fuzzy -O model.ckpt $CKPT_Link\n", - " clear_output()\n", - " if os.path.exists('/content/model.ckpt'):\n", - " if os.path.getsize(\"/content/model.ckpt\") > 1810671599:\n", - " if Custom_Model_Version=='1.5':\n", - " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", - " !unzip -o -q refmdlz\n", - " !rm -f refmdlz\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", - " clear_output()\n", - " !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v1\n", - " !rm -r /content/refmdl\n", - " elif Custom_Model_Version=='V2.1-512px':\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", - " clear_output()\n", - " !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n", - " elif Custom_Model_Version=='V2.1-768px':\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", - " clear_output()\n", - " !python /content/convertodiff.py /content/model.ckpt /content/stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1\n", - " !rm /content/convertodiff.py\n", - " if os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " clear_output()\n", - " MODEL_NAME=\"/content/stable-diffusion-custom\"\n", - " print('\u001b[1;32mDONE !')\n", - " else:\n", - " !rm -r /content/stable-diffusion-custom\n", - " !rm /content/model.ckpt\n", - " while not os.path.exists('/content/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):\n", - " print('\u001b[1;31mConversion error')\n", - " time.sleep(5)\n", - " else:\n", - " while os.path.getsize('/content/model.ckpt') < 1810671599:\n", - " print('\u001b[1;31mWrong link, check that the link is valid')\n", - " time.sleep(5)\n", - "\n", - "else:\n", - " if Model_Version==\"1.5\":\n", - " if not os.path.exists('/content/stable-diffusion-v1-5'):\n", - " downloadmodel()\n", - " MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n", - " else:\n", - " MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n", - " print(\"\u001b[1;32mThe v1.5 model already exists, using this model.\")\n", - " elif Model_Version==\"V2.1-512px\":\n", - " if not os.path.exists('/content/stable-diffusion-v2-512'):\n", - " newdownloadmodelb()\n", - " MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n", - " else:\n", - " MODEL_NAME=\"/content/stable-diffusion-v2-512\"\n", - " print(\"\u001b[1;32mThe v2-512px model already exists, using this model.\")\n", - " elif Model_Version==\"V2.1-768px\":\n", - " if not os.path.exists('/content/stable-diffusion-v2-768'):\n", - " newdownloadmodel()\n", - " MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n", - " else:\n", - " MODEL_NAME=\"/content/stable-diffusion-v2-768\"\n", - " print(\"\u001b[1;32mThe v2-768px model already exists, using this model.\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0tN76Cj5P3RL" - }, - "source": [ - "# Dreambooth" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "cellView": "form", - "id": "A1B299g-_VJo", - "outputId": "8e667545-16d6-488a-c737-2bd3f7f20111", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[1;32mCreating session...\n", - "\u001b[1;32mSession created, proceed to uploading instance images\n" - ] - } - ], - "source": [ - "import os\n", - "from IPython.display import clear_output\n", - "from IPython.utils import capture\n", - "from os import listdir\n", - "from os.path import isfile\n", - "import wget\n", - "import time\n", - "\n", - "#@markdown #Create/Load a Session\n", - "\n", - "try:\n", - " MODEL_NAME\n", - " pass\n", - "except:\n", - " MODEL_NAME=\"\"\n", - "\n", - "PT=\"\"\n", - "\n", - "Session_Name = \"elzczm\" #@param{type: 'string'}\n", - "while Session_Name==\"\":\n", - " print('\u001b[1;31mInput the Session Name:')\n", - " Session_Name=input('')\n", - "Session_Name=Session_Name.replace(\" \",\"_\")\n", - "\n", - "#@markdown - Enter the session name, it if it exists, it will load it, otherwise it'll create an new session.\n", - "\n", - "Session_Link_optional = \"\" #@param{type: 'string'}\n", - "\n", - "#@markdown - Import a session from another gdrive, the shared gdrive link must point to the specific session's folder that contains the trained CKPT, remove any intermediary CKPT if any.\n", - "\n", - "WORKSPACE='/content/gdrive/MyDrive/Fast-Dreambooth'\n", - "\n", - "if Session_Link_optional !=\"\":\n", - " print('\u001b[1;32mDownloading session...')\n", - "with capture.capture_output() as cap:\n", - " %cd /content\n", - " if Session_Link_optional != \"\":\n", - " if not os.path.exists(str(WORKSPACE+'/Sessions')):\n", - " %mkdir -p $WORKSPACE'/Sessions'\n", - " time.sleep(1)\n", - " %cd $WORKSPACE'/Sessions'\n", - " !gdown --folder --remaining-ok -O $Session_Name $Session_Link_optional\n", - " %cd $Session_Name\n", - " !rm -r instance_images\n", - " !unzip instance_images.zip\n", - " !rm -r concept_images\n", - " !unzip concept_images.zip\n", - " %cd /content\n", - "\n", - "\n", - "INSTANCE_NAME=Session_Name\n", - "OUTPUT_DIR=\"/content/models/\"+Session_Name\n", - "SESSION_DIR=WORKSPACE+'/Sessions/'+Session_Name\n", - "INSTANCE_DIR=SESSION_DIR+'/instance_images'\n", - "CONCEPT_DIR=SESSION_DIR+'/concept_images'\n", - "MDLPTH=str(SESSION_DIR+\"/\"+Session_Name+'.ckpt')\n", - "\n", - "Model_Version = \"1.5\" #@param [ \"1.5\", \"V2.1-512px\", \"V2.1-768px\"]\n", - "#@markdown - Ignore this if you're not loading a previous session that contains a trained model\n", - "\n", - "\n", - "if os.path.exists(str(SESSION_DIR)):\n", - " mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(\".\")[-1]==\"ckpt\"]\n", - " if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):\n", - "\n", - " def f(n):\n", - " k=0\n", - " for i in mdls:\n", - " if k==n:\n", - " !mv \"$SESSION_DIR/$i\" $MDLPTH\n", - " k=k+1\n", - "\n", - " k=0\n", - " print('\u001b[1;33mNo final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\\n\u001b[1;34m')\n", - "\n", - " for i in mdls:\n", - " print(str(k)+'- '+i)\n", - " k=k+1\n", - " n=input()\n", - " while int(n)>k-1:\n", - " n=input()\n", - " if n!=\"000\":\n", - " f(int(n))\n", - " print('\u001b[1;32mUsing the model '+ mdls[int(n)]+\" ...\")\n", - " time.sleep(2)\n", - " else:\n", - " print('\u001b[1;32mSkipping the intermediary checkpoints.')\n", - " del n\n", - "\n", - "\n", - "if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):\n", - " print('\u001b[1;32mLoading session with no previous model, using the original model or the custom downloaded model')\n", - " if MODEL_NAME==\"\":\n", - " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", - " else:\n", - " print('\u001b[1;32mSession Loaded, proceed to uploading instance images')\n", - "\n", - "elif os.path.exists(MDLPTH):\n", - " print('\u001b[1;32mSession found, loading the trained model ...')\n", - " if Model_Version=='1.5':\n", - " !wget -O refmdlz https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz\n", - " !unzip -o -q refmdlz\n", - " !rm -f refmdlz\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py\n", - " clear_output()\n", - " print('\u001b[1;32mSession found, loading the trained model ...')\n", - " !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v1\n", - " !rm -r /content/refmdl\n", - " elif Model_Version=='V2.1-512px':\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", - " clear_output()\n", - " print('\u001b[1;32mSession found, loading the trained model ...')\n", - " !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v2 --reference_model stabilityai/stable-diffusion-2-1-base\n", - " elif Model_Version=='V2.1-768px':\n", - " !wget -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py\n", - " clear_output()\n", - " print('\u001b[1;32mSession found, loading the trained model ...')\n", - " !python /content/convertodiff.py \"$MDLPTH\" \"$OUTPUT_DIR\" --v2 --reference_model stabilityai/stable-diffusion-2-1\n", - " !rm /content/convertodiff.py\n", - " if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", - " resume=True\n", - " clear_output()\n", - " print('\u001b[1;32mSession loaded.')\n", - " else:\n", - " if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", - " print('\u001b[1;31mConversion error, if the error persists, remove the CKPT file from the current session folder')\n", - "\n", - "elif not os.path.exists(str(SESSION_DIR)):\n", - " %mkdir -p \"$INSTANCE_DIR\"\n", - " print('\u001b[1;32mCreating session...')\n", - " if MODEL_NAME==\"\":\n", - " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", - " else:\n", - " print('\u001b[1;32mSession created, proceed to uploading instance images')\n", - "\n", - " #@markdown\n", - "\n", - " #@markdown # The most importent step is to rename the instance pictures of each subject to a unique unknown identifier, example :\n", - " #@markdown - If you have 30 pictures of yourself, simply select them all and rename only one to the chosen identifier for example : phtmejhn, the files would be : phtmejhn (1).jpg, phtmejhn (2).png ....etc then upload them, do the same for other people or objects with a different identifier, and that's it.\n", - " #@markdown - Check out this example : https://i.imgur.com/d2lD3rz.jpeg" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "cellView": "form", - "id": "LC4ukG60fgMy", - "outputId": "486c2ba3-9fe7-4a5d-fa13-395b07c63058", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 90 - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "\r |███████████████| 1/1 Uploaded" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "\u001b[1;32mDone, proceed to the next cell\n" - ] - }, - { - "output_type": "stream", - "name": "stderr", - "text": [ - "\n" - ] - } - ], - "source": [ - "import shutil\n", - "from google.colab import files\n", - "from PIL import Image\n", - "from tqdm import tqdm\n", - "\n", - "#@markdown #Instance Images\n", - "#@markdown ----\n", - "\n", - "#@markdown\n", - "#@markdown - Run the cell to upload the instance pictures.\n", - "\n", - "Remove_existing_instance_images= True #@param{type: 'boolean'}\n", - "#@markdown - Uncheck the box to keep the existing instance images.\n", - "\n", - "\n", - "if Remove_existing_instance_images:\n", - " if os.path.exists(str(INSTANCE_DIR)):\n", - " !rm -r \"$INSTANCE_DIR\"\n", - "\n", - "if not os.path.exists(str(INSTANCE_DIR)):\n", - " %mkdir -p \"$INSTANCE_DIR\"\n", - "\n", - "IMAGES_FOLDER_OPTIONAL=\"\" #@param{type: 'string'}\n", - "\n", - "#@markdown - If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) instance images. Leave EMPTY to upload.\n", - "\n", - "Crop_images= True #@param{type: 'boolean'}\n", - "Crop_size = \"512\" #@param [\"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"]\n", - "Crop_size=int(Crop_size)\n", - "\n", - "#@markdown - Unless you want to crop them manually in a precise way, you don't need to crop your instance images externally.\n", - "\n", - "while IMAGES_FOLDER_OPTIONAL !=\"\" and not os.path.exists(str(IMAGES_FOLDER_OPTIONAL)):\n", - " print('\u001b[1;31mThe image folder specified does not exist, use the colab file explorer to copy the path :')\n", - " IMAGES_FOLDER_OPTIONAL=input('')\n", - "\n", - "if IMAGES_FOLDER_OPTIONAL!=\"\":\n", - " if Crop_images:\n", - " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " extension = filename.split(\".\")[-1]\n", - " identifier=filename.split(\".\")[0]\n", - " new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n", - " file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n", - " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size):\n", - " side_length = min(width, height)\n", - " left = (width - side_length)/2\n", - " top = (height - side_length)/2\n", - " right = (width + side_length)/2\n", - " bottom = (height + side_length)/2\n", - " image = file.crop((left, top, right, bottom))\n", - " image = image.resize((Crop_size, Crop_size))\n", - " if (extension.upper() == \"JPG\"):\n", - " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", - " else:\n", - " image.save(new_path_with_file, format=extension.upper())\n", - " else:\n", - " !cp \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$INSTANCE_DIR\"\n", - "\n", - " else:\n", - " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$INSTANCE_DIR\"\n", - "\n", - " print('\\n\u001b[1;32mDone, proceed to the next cell')\n", - "\n", - "\n", - "elif IMAGES_FOLDER_OPTIONAL ==\"\":\n", - " uploaded = files.upload()\n", - " if Crop_images:\n", - " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " shutil.move(filename, INSTANCE_DIR)\n", - " extension = filename.split(\".\")[-1]\n", - " identifier=filename.split(\".\")[0]\n", - " new_path_with_file = os.path.join(INSTANCE_DIR, filename)\n", - " file = Image.open(new_path_with_file)\n", - " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size):\n", - " side_length = min(width, height)\n", - " left = (width - side_length)/2\n", - " top = (height - side_length)/2\n", - " right = (width + side_length)/2\n", - " bottom = (height + side_length)/2\n", - " image = file.crop((left, top, right, bottom))\n", - " image = image.resize((Crop_size, Crop_size))\n", - " if (extension.upper() == \"JPG\"):\n", - " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", - " else:\n", - " image.save(new_path_with_file, format=extension.upper())\n", - " clear_output()\n", - " else:\n", - " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " shutil.move(filename, INSTANCE_DIR)\n", - " clear_output()\n", - "\n", - " print('\\n\u001b[1;32mDone, proceed to the next cell')\n", - "\n", - "with capture.capture_output() as cap:\n", - " %cd \"$INSTANCE_DIR\"\n", - " !find . -name \"* *\" -type f | rename 's/ /-/g'\n", - "\n", - " %cd $SESSION_DIR\n", - " !rm instance_images.zip\n", - " !zip -r instance_images instance_images\n", - " %cd /content" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "cellView": "form", - "id": "LxEv3u8mQos3", - "outputId": "44dccae5-02b5-4e65-aa4b-53dcac4133c3", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 646 - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "\r |███████████████| 17/17 Uploaded\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "\u001b[1;32mAlmost done...\n", - "\n", - "\u001b[1;32mDone, proceed to the training cell\n" - ] - } - ], - "source": [ - "import shutil\n", - "from google.colab import files\n", - "from PIL import Image\n", - "from tqdm import tqdm\n", - "\n", - "#@markdown #Concept Images\n", - "#@markdown ----\n", - "\n", - "#@markdown\n", - "#@markdown - Run this `optional` cell to upload concept pictures. If you're traning on a specific face, skip this cell.\n", - "#@markdown - Training a model on a restricted number of instance images tends to indoctrinate it and limit its imagination, so concept images help re-opening its \"mind\" to diversity and greatly widen the range of possibilities of the output, concept images should contain anything related to the instance pictures, including objects, ideas, scenes, phenomenons, concepts (obviously), don't be afraid to slightly diverge from the trained style. The resolution of the pictures doesn't matter.\n", - "\n", - "Remove_existing_concept_images= True #@param{type: 'boolean'}\n", - "#@markdown - Uncheck the box to keep the existing concept images.\n", - "\n", - "\n", - "if Remove_existing_concept_images:\n", - " if os.path.exists(str(CONCEPT_DIR)):\n", - " !rm -r \"$CONCEPT_DIR\"\n", - "\n", - "if not os.path.exists(str(CONCEPT_DIR)):\n", - " %mkdir -p \"$CONCEPT_DIR\"\n", - "\n", - "IMAGES_FOLDER_OPTIONAL=\"\" #@param{type: 'string'}\n", - "\n", - "#@markdown - If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) concept images. Leave EMPTY to upload.\n", - "\n", - "Crop_images= True\n", - "Crop_size = \"512\"\n", - "Crop_size=int(Crop_size)\n", - "\n", - "while IMAGES_FOLDER_OPTIONAL !=\"\" and not os.path.exists(str(IMAGES_FOLDER_OPTIONAL)):\n", - " print('\u001b[1;31mThe image folder specified does not exist, use the colab file explorer to copy the path :')\n", - " IMAGES_FOLDER_OPTIONAL=input('')\n", - "\n", - "if IMAGES_FOLDER_OPTIONAL!=\"\":\n", - " if Crop_images:\n", - " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " extension = filename.split(\".\")[-1]\n", - " identifier=filename.split(\".\")[0]\n", - " new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n", - " file = Image.open(IMAGES_FOLDER_OPTIONAL+\"/\"+filename)\n", - " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size):\n", - " side_length = min(width, height)\n", - " left = (width - side_length)/2\n", - " top = (height - side_length)/2\n", - " right = (width + side_length)/2\n", - " bottom = (height + side_length)/2\n", - " image = file.crop((left, top, right, bottom))\n", - " image = image.resize((Crop_size, Crop_size))\n", - " if (extension.upper() == \"JPG\"):\n", - " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", - " else:\n", - " image.save(new_path_with_file, format=extension.upper())\n", - " else:\n", - " !cp \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$CONCEPT_DIR\"\n", - "\n", - " else:\n", - " for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " %cp -r \"$IMAGES_FOLDER_OPTIONAL/$filename\" \"$CONCEPT_DIR\"\n", - "\n", - "elif IMAGES_FOLDER_OPTIONAL ==\"\":\n", - " uploaded = files.upload()\n", - " if Crop_images:\n", - " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " shutil.move(filename, CONCEPT_DIR)\n", - " extension = filename.split(\".\")[-1]\n", - " identifier=filename.split(\".\")[0]\n", - " new_path_with_file = os.path.join(CONCEPT_DIR, filename)\n", - " file = Image.open(new_path_with_file)\n", - " width, height = file.size\n", - " if file.size !=(Crop_size, Crop_size):\n", - " side_length = min(width, height)\n", - " left = (width - side_length)/2\n", - " top = (height - side_length)/2\n", - " right = (width + side_length)/2\n", - " bottom = (height + side_length)/2\n", - " image = file.crop((left, top, right, bottom))\n", - " image = image.resize((Crop_size, Crop_size))\n", - " if (extension.upper() == \"JPG\"):\n", - " image.save(new_path_with_file, format=\"JPEG\", quality = 100)\n", - " else:\n", - " image.save(new_path_with_file, format=extension.upper())\n", - " clear_output()\n", - " else:\n", - " for filename in tqdm(uploaded.keys(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):\n", - " shutil.move(filename, CONCEPT_DIR)\n", - " clear_output()\n", - "\n", - "\n", - "print('\\n\u001b[1;32mAlmost done...')\n", - "with capture.capture_output() as cap:\n", - " i=0\n", - " for filename in os.listdir(CONCEPT_DIR):\n", - " extension = filename.split(\".\")[-1]\n", - " identifier=filename.split(\".\")[0]\n", - " new_path_with_file = os.path.join(CONCEPT_DIR, \"conceptimagedb\"+str(i)+\".\"+extension)\n", - " filepath=os.path.join(CONCEPT_DIR,filename)\n", - " !mv \"$filepath\" $new_path_with_file\n", - " i=i+1\n", - "\n", - " %cd $SESSION_DIR\n", - " !rm concept_images.zip\n", - " !zip -r concept_images concept_images\n", - " %cd /content\n", - "\n", - "print('\\n\u001b[1;32mDone, proceed to the training cell')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZnmQYfZilzY6" - }, - "source": [ - "# Training" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "cellView": "form", - "id": "1-9QbkfAVYYU", - "outputId": "90136696-9ae8-44f3-928f-9c6d517c0603", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\n" - ] - } - ], - "source": [ - "#@markdown ---\n", - "#@markdown #Start DreamBooth\n", - "#@markdown ---\n", - "import os\n", - "from subprocess import getoutput\n", - "from IPython.display import clear_output\n", - "from google.colab import runtime\n", - "import time\n", - "import random\n", - "\n", - "if os.path.exists(INSTANCE_DIR+\"/.ipynb_checkpoints\"):\n", - " %rm -r $INSTANCE_DIR\"/.ipynb_checkpoints\"\n", - "\n", - "if os.path.exists(CONCEPT_DIR+\"/.ipynb_checkpoints\"):\n", - " %rm -r $CONCEPT_DIR\"/.ipynb_checkpoints\"\n", - "\n", - "Resume_Training = True #@param {type:\"boolean\"}\n", - "\n", - "try:\n", - " resume\n", - " if resume and not Resume_Training:\n", - " print('\u001b[1;31mOverwrite your previously trained model ?, answering \"yes\" will train a new model, answering \"no\" will resume the training of the previous model?  yes or no ?\u001b[0m')\n", - " while True:\n", - " ansres=input('')\n", - " if ansres=='no':\n", - " Resume_Training = True\n", - " del ansres\n", - " break\n", - " elif ansres=='yes':\n", - " Resume_Training = False\n", - " resume= False\n", - " break\n", - "except:\n", - " pass\n", - "\n", - "while not Resume_Training and MODEL_NAME==\"\":\n", - " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", - " time.sleep(5)\n", - "\n", - "#@markdown - If you're not satisfied with the result, check this box, run again the cell and it will continue training the current model.\n", - "\n", - "MODELT_NAME=MODEL_NAME\n", - "\n", - "UNet_Training_Steps=0 #@param{type: 'number'}\n", - "\n", - "#@markdown - Start with 3000 or lower, test the model, if not enough, resume training for 1000 steps, keep testing until you get the desired output, `set it to 0 to train only the text_encoder`.\n", - "\n", - "Text_Encoder_Training_Steps=0 #@param{type: 'number'}\n", - "\n", - "#@markdown - 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, `set it to 0 before resuming training if it is already trained`.\n", - "\n", - "Text_Encoder_Concept_Training_Steps=1000 #@param{type: 'number'}\n", - "\n", - "#@markdown - Suitable for training a style/concept as it acts as heavy regularization, set it to 1500 steps for 200 concept images (you can go higher), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, `set it to 0 before resuming training if it is already trained`.\n", - "\n", - "trnonltxt=\"\"\n", - "if UNet_Training_Steps==0:\n", - " trnonltxt=\"--train_only_text_encoder\"\n", - "\n", - "Seed=''\n", - "\n", - "Style_Training = False #@param {type:\"boolean\"}\n", - "\n", - "#@markdown - Further reduce overfitting, suitable when training a style or a general theme, keep the steps low.\n", - "\n", - "Style=\"\"\n", - "if Style_Training:\n", - " Style=\"--Style\"\n", - "\n", - "Resolution = \"512\" #@param [\"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"]\n", - "Res=int(Resolution)\n", - "\n", - "#@markdown - Higher resolution = Higher quality, make sure the instance images are cropped to this selected size (or larger).\n", - "\n", - "fp16 = True\n", - "\n", - "if Seed =='' or Seed=='0':\n", - " Seed=random.randint(1, 999999)\n", - "else:\n", - " Seed=int(Seed)\n", - "\n", - "if fp16:\n", - " prec=\"fp16\"\n", - "else:\n", - " prec=\"no\"\n", - "\n", - "s = getoutput('nvidia-smi')\n", - "if 'A100' in s:\n", - " precision=\"no\"\n", - "else:\n", - " precision=prec\n", - "\n", - "resuming=\"\"\n", - "if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", - " MODELT_NAME=OUTPUT_DIR\n", - " print('\u001b[1;32mResuming Training...\u001b[0m')\n", - " resuming=\"Yes\"\n", - "elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):\n", - " print('\u001b[1;31mPrevious model not found, training a new model...\u001b[0m')\n", - " MODELT_NAME=MODEL_NAME\n", - " while MODEL_NAME==\"\":\n", - " print('\u001b[1;31mNo model found, use the \"Model Download\" cell to download a model.')\n", - " time.sleep(5)\n", - "\n", - "V2=False\n", - "if os.path.getsize(MODELT_NAME+\"/text_encoder/pytorch_model.bin\") > 670901463:\n", - " V2=True\n", - "\n", - "Enable_text_encoder_training= True\n", - "Enable_Text_Encoder_Concept_Training= True\n", - "\n", - "if Text_Encoder_Training_Steps==0:\n", - " Enable_text_encoder_training= False\n", - "else:\n", - " stptxt=Text_Encoder_Training_Steps\n", - "\n", - "if Text_Encoder_Concept_Training_Steps==0:\n", - " Enable_Text_Encoder_Concept_Training= False\n", - "else:\n", - " stptxtc=Text_Encoder_Concept_Training_Steps\n", - "\n", - "\n", - "if Enable_text_encoder_training:\n", - " Textenc=\"--train_text_encoder\"\n", - "else:\n", - " Textenc=\"\"\n", - "\n", - "#@markdown ---------------------------\n", - "Save_Checkpoint_Every_n_Steps = True #@param {type:\"boolean\"}\n", - "Save_Checkpoint_Every=250 #@param{type: 'number'}\n", - "if Save_Checkpoint_Every==None:\n", - " Save_Checkpoint_Every=1\n", - "#@markdown - Minimum 200 steps between each save.\n", - "stp=0\n", - "Start_saving_from_the_step=250 #@param{type: 'number'}\n", - "if Start_saving_from_the_step==None:\n", - " Start_saving_from_the_step=0\n", - "if (Start_saving_from_the_step < 200):\n", - " Start_saving_from_the_step=Save_Checkpoint_Every\n", - "stpsv=Start_saving_from_the_step\n", - "if Save_Checkpoint_Every_n_Steps:\n", - " stp=Save_Checkpoint_Every\n", - "#@markdown - Start saving intermediary checkpoints from this step.\n", - "\n", - "Disconnect_after_training=False #@param {type:\"boolean\"}\n", - "\n", - "#@markdown - Auto-disconnect from google colab after the training to avoid wasting compute units.\n", - "\n", - "def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):\n", - "\n", - " !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n", - " $trnonltxt \\\n", - " --image_captions_filename \\\n", - " --train_text_encoder \\\n", - " --dump_only_text_encoder \\\n", - " --pretrained_model_name_or_path=\"$MODELT_NAME\" \\\n", - " --instance_data_dir=\"$INSTANCE_DIR\" \\\n", - " --output_dir=\"$OUTPUT_DIR\" \\\n", - " --instance_prompt=\"$PT\" \\\n", - " --seed=$Seed \\\n", - " --resolution=512 \\\n", - " --mixed_precision=$precision \\\n", - " --train_batch_size=1 \\\n", - " --gradient_accumulation_steps=1 --gradient_checkpointing \\\n", - " --use_8bit_adam \\\n", - " --learning_rate=2e-6 \\\n", - " --lr_scheduler=\"polynomial\" \\\n", - " --lr_warmup_steps=0 \\\n", - " --max_train_steps=$Training_Steps\n", - "\n", - "def train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps):\n", - " clear_output()\n", - " if resuming==\"Yes\":\n", - " print('\u001b[1;32mResuming Training...\u001b[0m')\n", - " print('\u001b[1;33mTraining the UNet...\u001b[0m')\n", - " !accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n", - " $Style \\\n", - " --image_captions_filename \\\n", - " --train_only_unet \\\n", - " --save_starting_step=$stpsv \\\n", - " --save_n_steps=$stp \\\n", - " --Session_dir=$SESSION_DIR \\\n", - " --pretrained_model_name_or_path=\"$MODELT_NAME\" \\\n", - " --instance_data_dir=\"$INSTANCE_DIR\" \\\n", - " --output_dir=\"$OUTPUT_DIR\" \\\n", - " --instance_prompt=\"$PT\" \\\n", - " --seed=$Seed \\\n", - " --resolution=$Res \\\n", - " --mixed_precision=$precision \\\n", - " --train_batch_size=1 \\\n", - " --gradient_accumulation_steps=1 --gradient_checkpointing \\\n", - " --use_8bit_adam \\\n", - " --learning_rate=2e-6 \\\n", - " --lr_scheduler=\"polynomial\" \\\n", - " --lr_warmup_steps=0 \\\n", - " --max_train_steps=$Training_Steps\n", - "\n", - "\n", - "if Enable_text_encoder_training :\n", - " print('\u001b[1;33mTraining the text encoder...\u001b[0m')\n", - " if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):\n", - " %rm -r $OUTPUT_DIR\"/text_encoder_trained\"\n", - " dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)\n", - "if Enable_Text_Encoder_Concept_Training and os.listdir(CONCEPT_DIR)!=[]:\n", - " clear_output()\n", - " if resuming==\"Yes\":\n", - " print('\u001b[1;32mResuming Training...\u001b[0m')\n", - " print('\u001b[1;33mTraining the text encoder on the concept...\u001b[0m')\n", - " dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)\n", - "elif Enable_Text_Encoder_Concept_Training and os.listdir(CONCEPT_DIR)==[]:\n", - " print('\u001b[1;31mNo concept images found, skipping concept training...')\n", - " time.sleep(8)\n", - "if UNet_Training_Steps!=0:\n", - " train_only_unet(stpsv, stp, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, Res, precision, Training_Steps=UNet_Training_Steps)\n", - "\n", - "\n", - "if os.path.exists('/content/models/'+INSTANCE_NAME+'/unet/diffusion_pytorch_model.bin'):\n", - " prc=\"--fp16\" if precision==\"fp16\" else \"\"\n", - " if V2:\n", - " !python /content/diffusers/scripts/convertosdv2.py $prc $OUTPUT_DIR $SESSION_DIR/$Session_Name\".ckpt\"\n", - " clear_output()\n", - " if os.path.exists(SESSION_DIR+\"/\"+INSTANCE_NAME+'.ckpt'):\n", - " clear_output()\n", - " print(\"\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\")\n", - " if Disconnect_after_training :\n", - " time.sleep(20)\n", - " runtime.unassign()\n", - " else:\n", - " print(\"\u001b[1;31mSomething went wrong\")\n", - " else:\n", - " !wget -O /content/convertosd.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertosd.py\n", - " clear_output()\n", - " if precision==\"no\":\n", - " !sed -i '226s@.*@@' /content/convertosd.py\n", - " !sed -i '201s@.*@ model_path = \"{OUTPUT_DIR}\"@' /content/convertosd.py\n", - " !sed -i '202s@.*@ checkpoint_path= \"{SESSION_DIR}/{Session_Name}.ckpt\"@' /content/convertosd.py\n", - " !python /content/convertosd.py\n", - " !rm /content/convertosd.py\n", - " clear_output()\n", - " if os.path.exists(SESSION_DIR+\"/\"+INSTANCE_NAME+'.ckpt'):\n", - " print(\"\u001b[1;32mDONE, the CKPT model is in your Gdrive in the sessions folder\")\n", - " if Disconnect_after_training :\n", - " time.sleep(20)\n", - " runtime.unassign()\n", - " else:\n", - " print(\"\u001b[1;31mSomething went wrong\")\n", - "\n", - "else:\n", - " print(\"\u001b[1;31mSomething went wrong\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ehi1KKs-l-ZS" - }, - "source": [ - "# Test The Trained Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "iAZGngFcI8hq", - "outputId": "f9012994-7d17-481d-fdc0-7362318eefaf", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "LatentDiffusion: Running in eps-prediction mode\n", - "DiffusionWrapper has 865.91 M params.\n" - ] - } - ], - "source": [ - "import os\n", - "import time\n", - "import sys\n", - "import fileinput\n", - "from IPython.display import clear_output\n", - "from subprocess import getoutput\n", - "from IPython.utils import capture\n", - "\n", - "\n", - "Model_Version = \"V2.1-512\" #@param [\"1.5\", \"V2.1-512\", \"V2.1-768\"]\n", - "#@markdown - Important! Choose the correct version and resolution of the model\n", - "\n", - "Update_repo = True\n", - "\n", - "Session__Name=\"\" #@param{type: 'string'}\n", - "\n", - "#@markdown - Leave empty if you want to use the current trained model.\n", - "\n", - "Use_Custom_Path = False #@param {type:\"boolean\"}\n", - "\n", - "try:\n", - " INSTANCE_NAME\n", - " INSTANCET=INSTANCE_NAME\n", - "except:\n", - " pass\n", - "#@markdown - if checked, an input box will ask the full path to a desired model.\n", - "\n", - "if Session__Name!=\"\":\n", - " INSTANCET=Session__Name\n", - " INSTANCET=INSTANCET.replace(\" \",\"_\")\n", - "\n", - "if Use_Custom_Path:\n", - " try:\n", - " INSTANCET\n", - " del INSTANCET\n", - " except:\n", - " pass\n", - "\n", - "try:\n", - " INSTANCET\n", - " if Session__Name!=\"\":\n", - " path_to_trained_model='/content/gdrive/MyDrive/Fast-Dreambooth/Sessions/'+Session__Name+\"/\"+Session__Name+'.ckpt'\n", - " else:\n", - " path_to_trained_model=SESSION_DIR+\"/\"+INSTANCET+'.ckpt'\n", - "except:\n", - " print('\u001b[1;31mIt seems that you did not perform training during this session \u001b[1;32mor you chose to use a custom path,\\nprovide the full path to the model (including the name of the model):\\n')\n", - " path_to_trained_model=input()\n", - "\n", - "while not os.path.exists(path_to_trained_model):\n", - " print(\"\u001b[1;31mThe model doesn't exist on you Gdrive, use the file explorer to get the path : \")\n", - " path_to_trained_model=input()\n", - "\n", - "\n", - "with capture.capture_output() as cap:\n", - " %cd /content/gdrive/MyDrive/\n", - " %mkdir sd\n", - " %cd sd\n", - " !git clone https://github.com/Stability-AI/stablediffusion\n", - " !git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui\n", - " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", - " !mkdir -p cache/{huggingface,torch}\n", - " %cd /content/\n", - " !ln -s /content/gdrive/MyDrive/sd/stable-diffusion-webui/cache/huggingface ../root/.cache/\n", - " !ln -s /content/gdrive/MyDrive/sd/stable-diffusion-webui/cache/torch ../root/.cache/\n", - " !wget -O /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/shared.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/modules/shared.py\n", - "\n", - "if Update_repo:\n", - " with capture.capture_output() as cap:\n", - " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.sh\n", - " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/paths.py\n", - " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py\n", - " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/ui.py\n", - " !rm /content/gdrive/MyDrive/sd/stable-diffusion-webui/style.css\n", - " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", - " print('\u001b[1;32m')\n", - " !git pull\n", - "\n", - "\n", - "with capture.capture_output() as cap:\n", - "\n", - " if not os.path.exists('/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion'):\n", - " !mkdir /content/gdrive/MyDrive/sd/stablediffusion/src\n", - " %cd /content/gdrive/MyDrive/sd/stablediffusion/src\n", - " !git clone https://github.com/CompVis/taming-transformers\n", - " !git clone https://github.com/openai/CLIP\n", - " !git clone https://github.com/salesforce/BLIP\n", - " !git clone https://github.com/sczhou/CodeFormer\n", - " !git clone https://github.com/crowsonkb/k-diffusion\n", - " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/CLIP /content/gdrive/MyDrive/sd/stablediffusion/src/clip\n", - " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/BLIP /content/gdrive/MyDrive/sd/stablediffusion/src/blip\n", - " !mv /content/gdrive/MyDrive/sd/stablediffusion/src/CodeFormer /content/gdrive/MyDrive/sd/stablediffusion/src/codeformer\n", - " !cp -r /content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", - "\n", - "\n", - "with capture.capture_output() as cap:\n", - " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules\n", - " !wget -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py\n", - "\n", - "with capture.capture_output() as cap:\n", - " if not os.path.exists('/tools/node/bin/lt'):\n", - " !npm install -g localtunnel\n", - "\n", - "with capture.capture_output() as cap:\n", - " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/\n", - " time.sleep(1)\n", - " !wget -O webui.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.py\n", - " !sed -i 's@ui.create_ui().*@ui.create_ui();shared.demo.queue(concurrency_count=999999,status_update_rate=0.1)@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py\n", - " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/\n", - " !wget -O ui.py https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/modules/ui.py\n", - " !sed -i 's@css = \"\".*@with open(os.path.join(script_path, \"style.css\"), \"r\", encoding=\"utf8\") as file:\\n css = file.read()@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/ui.py\n", - " %cd /content/gdrive/MyDrive/sd/stable-diffusion-webui\n", - " !wget -O style.css https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/style.css\n", - " !sed -i 's@min-height: 4.*@min-height: 5.5em;@g' /content/gdrive/MyDrive/sd/stable-diffusion-webui/style.css\n", - " !sed -i 's@\"multiple_tqdm\": true,@\\\"multiple_tqdm\": false,@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/config.json\n", - " !sed -i '902s@.*@ self.logvar = self.logvar.to(self.device)@' /content/gdrive/MyDrive/sd/stablediffusion/ldm/models/diffusion/ddpm.py\n", - " %cd /content\n", - "\n", - "\n", - "Use_Gradio_Server = True #@param {type:\"boolean\"}\n", - "#@markdown - Only if you have trouble connecting to the local server.\n", - "\n", - "Large_Model= False #@param {type:\"boolean\"}\n", - "#@markdown - Check if you have trouble loading a model 7GB+\n", - "\n", - "if Large_Model:\n", - " !sed -i 's@cmd_opts.lowram else \\\"cpu\\\"@cmd_opts.lowram else \\\"cuda\\\"@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/shared.py\n", - "else:\n", - " !sed -i 's@cmd_opts.lowram else \\\"cuda\\\"@cmd_opts.lowram else \\\"cpu\\\"@' /content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/shared.py\n", - "\n", - "\n", - "share=''\n", - "if Use_Gradio_Server:\n", - " share='--share'\n", - " for line in fileinput.input('/usr/local/lib/python3.8/dist-packages/gradio/blocks.py', inplace=True):\n", - " if line.strip().startswith('self.server_name ='):\n", - " line = ' self.server_name = server_name\\n'\n", - " if line.strip().startswith('self.server_port ='):\n", - " line = ' self.server_port = server_port\\n'\n", - " sys.stdout.write(line)\n", - " clear_output()\n", - "\n", - "else:\n", - " share=''\n", - " !nohup lt --port 7860 > srv.txt 2>&1 &\n", - " time.sleep(2)\n", - " !grep -o 'https[^ ]*' /content/srv.txt >srvr.txt\n", - " time.sleep(2)\n", - " srv= getoutput('cat /content/srvr.txt')\n", - "\n", - " for line in fileinput.input('/usr/local/lib/python3.8/dist-packages/gradio/blocks.py', inplace=True):\n", - " if line.strip().startswith('self.server_name ='):\n", - " line = f' self.server_name = \"{srv[8:]}\"\\n'\n", - " if line.strip().startswith('self.server_port ='):\n", - " line = ' self.server_port = 443\\n'\n", - " if line.strip().startswith('self.protocol = \"https\"'):\n", - " line = ' self.protocol = \"https\"\\n'\n", - " if line.strip().startswith('if self.local_url.startswith(\"https\") or self.is_colab'):\n", - " line = ''\n", - " if line.strip().startswith('else \"http\"'):\n", - " line = ''\n", - " sys.stdout.write(line)\n", - "\n", - "\n", - " !sed -i '13s@.*@ \"PUBLIC_SHARE_TRUE\": \"\u001b[32mConnected\",@' /usr/local/lib/python3.8/dist-packages/gradio/strings.py\n", - "\n", - " !rm /content/srv.txt\n", - " !rm /content/srvr.txt\n", - " clear_output()\n", - "\n", - "with capture.capture_output() as cap:\n", - " %cd /content/gdrive/MyDrive/sd/stablediffusion/\n", - "\n", - "if Model_Version == \"V2.1-768\":\n", - " configf=\"--config /content/gdrive/MyDrive/sd/stablediffusion/configs/stable-diffusion/v2-inference-v.yaml\"\n", - " !sed -i 's@def load_state_dict(checkpoint_path: str, map_location.*@def load_state_dict(checkpoint_path: str, map_location=\"cuda\"):@' /usr/local/lib/python3.8/dist-packages/open_clip/factory.py\n", - " NM=\"True\"\n", - "elif Model_Version == \"V2.1-512\":\n", - " configf=\"--config /content/gdrive/MyDrive/sd/stablediffusion/configs/stable-diffusion/v2-inference.yaml\"\n", - " !sed -i 's@def load_state_dict(checkpoint_path: str, map_location.*@def load_state_dict(checkpoint_path: str, map_location=\"cuda\"):@' /usr/local/lib/python3.8/dist-packages/open_clip/factory.py\n", - " NM=\"True\"\n", - "else:\n", - " configf=\"\"\n", - " !sed -i 's@def load_state_dict(checkpoint_path: str, map_location.*@def load_state_dict(checkpoint_path: str, map_location=\"cpu\"):@' /usr/local/lib/python3.8/dist-packages/open_clip/factory.py\n", - " NM=\"False\"\n", - "\n", - "if os.path.exists('/usr/local/lib/python3.8/dist-packages/xformers'):\n", - " xformers=\"--xformers\"\n", - "else:\n", - " xformers=\"\"\n", - "\n", - "if os.path.isfile(path_to_trained_model):\n", - " ckpt_dir=os.path.dirname(path_to_trained_model)\n", - " ckpt_cfg='--ckpt %s --ckpt-dir %s' % (path_to_trained_model, ckpt_dir)\n", - "else:\n", - " ckpt_cfg='--ckpt-dir %s' % path_to_trained_model\n", - "\n", - "!python /content/gdrive/MyDrive/sd/stable-diffusion-webui/webui.py $share --disable-safe-unpickle --no-half-vae --enable-insecure-extension-access $ckpt_cfg $configf $xformers" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "d_mQ23XsOc5R" - }, - "source": [ - "# Upload The Trained Model to Hugging Face" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "NTqUIuhROdH4" - }, - "outputs": [], - "source": [ - "from slugify import slugify\n", - "from huggingface_hub import HfApi, HfFolder, CommitOperationAdd\n", - "from huggingface_hub import create_repo\n", - "from IPython.display import display_markdown\n", - "from IPython.display import clear_output\n", - "from IPython.utils import capture\n", - "from google.colab import files\n", - "import shutil\n", - "import time\n", - "import os\n", - "\n", - "Upload_sample_images = False #@param {type:\"boolean\"}\n", - "#@markdown - Upload showcase images of your trained model\n", - "\n", - "Name_of_your_concept = \"\" #@param {type:\"string\"}\n", - "if(Name_of_your_concept == \"\"):\n", - " Name_of_your_concept = Session_Name\n", - "Name_of_your_concept=Name_of_your_concept.replace(\" \",\"-\")\n", - "\n", - "Save_concept_to = \"My_Profile\" #@param [\"Public_Library\", \"My_Profile\"]\n", - "\n", - "#@markdown - [Create a write access token](https://huggingface.co/settings/tokens) , go to \"New token\" -> Role : Write. A regular read token won't work here.\n", - "hf_token_write = \"\" #@param {type:\"string\"}\n", - "if hf_token_write ==\"\":\n", - " print('\u001b[1;32mYour Hugging Face write access token : ')\n", - " hf_token_write=input()\n", - "\n", - "hf_token = hf_token_write\n", - "\n", - "api = HfApi()\n", - "your_username = api.whoami(token=hf_token)[\"name\"]\n", - "\n", - "if(Save_concept_to == \"Public_Library\"):\n", - " repo_id = f\"sd-dreambooth-library/{slugify(Name_of_your_concept)}\"\n", - " #Join the Concepts Library organization if you aren't part of it already\n", - " !curl -X POST -H 'Authorization: Bearer '$hf_token -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX\n", - "else:\n", - " repo_id = f\"{your_username}/{slugify(Name_of_your_concept)}\"\n", - "output_dir = f'/content/models/'+INSTANCE_NAME\n", - "\n", - "def bar(prg):\n", - " br=\"\u001b[1;33mUploading to HuggingFace : \" '\u001b[0m|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ \"%\"\n", - " return br\n", - "\n", - "print(\"\u001b[1;32mLoading...\")\n", - "\n", - "NM=\"False\"\n", - "if os.path.getsize(OUTPUT_DIR+\"/text_encoder/pytorch_model.bin\") > 670901463:\n", - " NM=\"True\"\n", - "\n", - "\n", - "if NM==\"False\":\n", - " with capture.capture_output() as cap:\n", - " %cd $OUTPUT_DIR\n", - " !rm -r safety_checker feature_extractor .git\n", - " !rm model_index.json\n", - " !git init\n", - " !git lfs install --system --skip-repo\n", - " !git remote add -f origin \"https://USER:{hf_token}@huggingface.co/runwayml/stable-diffusion-v1-5\"\n", - " !git config core.sparsecheckout true\n", - " !echo -e \"feature_extractor\\nsafety_checker\\nmodel_index.json\" > .git/info/sparse-checkout\n", - " !git pull origin main\n", - " !rm -r .git\n", - " %cd /content\n", - "\n", - "image_string = \"\"\n", - "\n", - "if os.path.exists('/content/sample_images'):\n", - " !rm -r /content/sample_images\n", - "Samples=\"/content/sample_images\"\n", - "!mkdir $Samples\n", - "clear_output()\n", - "\n", - "if Upload_sample_images:\n", - "\n", - " print(\"\u001b[1;32mUpload Sample images of the model\")\n", - " uploaded = files.upload()\n", - " for filename in uploaded.keys():\n", - " shutil.move(filename, Samples)\n", - " %cd $Samples\n", - " !find . -name \"* *\" -type f | rename 's/ /_/g'\n", - " %cd /content\n", - " clear_output()\n", - "\n", - " print(bar(1))\n", - "\n", - " images_upload = os.listdir(Samples)\n", - " instance_prompt_list = []\n", - " for i, image in enumerate(images_upload):\n", - " image_string = f'''\n", - " {image_string}![{i}](https://huggingface.co/{repo_id}/resolve/main/sample_images/{image})\n", - " '''\n", - "\n", - "readme_text = f'''---\n", - "license: creativeml-openrail-m\n", - "tags:\n", - "- text-to-image\n", - "- stable-diffusion\n", - "---\n", - "### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)[\"name\"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook\n", - "\n", - "\n", - "Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)\n", - "Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)\n", - "\n", - "Sample pictures of this concept:\n", - "{image_string}\n", - "'''\n", - "#Save the readme to a file\n", - "readme_file = open(\"README.md\", \"w\")\n", - "readme_file.write(readme_text)\n", - "readme_file.close()\n", - "\n", - "operations = [\n", - " CommitOperationAdd(path_in_repo=\"README.md\", path_or_fileobj=\"README.md\"),\n", - " CommitOperationAdd(path_in_repo=f\"{Session_Name}.ckpt\",path_or_fileobj=MDLPTH)\n", - "\n", - "]\n", - "create_repo(repo_id,private=True, token=hf_token)\n", - "\n", - "api.create_commit(\n", - " repo_id=repo_id,\n", - " operations=operations,\n", - " commit_message=f\"Upload the concept {Name_of_your_concept} embeds and token\",\n", - " token=hf_token\n", - ")\n", - "\n", - "if NM==\"False\":\n", - " api.upload_folder(\n", - " folder_path=OUTPUT_DIR+\"/feature_extractor\",\n", - " path_in_repo=\"feature_extractor\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - " )\n", - "\n", - "clear_output()\n", - "print(bar(4))\n", - "\n", - "if NM==\"False\":\n", - " api.upload_folder(\n", - " folder_path=OUTPUT_DIR+\"/safety_checker\",\n", - " path_in_repo=\"safety_checker\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - " )\n", - "\n", - "clear_output()\n", - "print(bar(8))\n", - "\n", - "\n", - "api.upload_folder(\n", - " folder_path=OUTPUT_DIR+\"/scheduler\",\n", - " path_in_repo=\"scheduler\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - ")\n", - "\n", - "clear_output()\n", - "print(bar(9))\n", - "\n", - "api.upload_folder(\n", - " folder_path=OUTPUT_DIR+\"/text_encoder\",\n", - " path_in_repo=\"text_encoder\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - ")\n", - "\n", - "clear_output()\n", - "print(bar(12))\n", - "\n", - "api.upload_folder(\n", - " folder_path=OUTPUT_DIR+\"/tokenizer\",\n", - " path_in_repo=\"tokenizer\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - ")\n", - "\n", - "clear_output()\n", - "print(bar(13))\n", - "\n", - "api.upload_folder(\n", - " folder_path=OUTPUT_DIR+\"/unet\",\n", - " path_in_repo=\"unet\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - ")\n", - "\n", - "clear_output()\n", - "print(bar(21))\n", - "\n", - "api.upload_folder(\n", - " folder_path=OUTPUT_DIR+\"/vae\",\n", - " path_in_repo=\"vae\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - ")\n", - "\n", - "clear_output()\n", - "print(bar(23))\n", - "\n", - "api.upload_file(\n", - " path_or_fileobj=OUTPUT_DIR+\"/model_index.json\",\n", - " path_in_repo=\"model_index.json\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - ")\n", - "\n", - "clear_output()\n", - "print(bar(24))\n", - "\n", - "api.upload_folder(\n", - " folder_path=Samples,\n", - " path_in_repo=\"sample_images\",\n", - " repo_id=repo_id,\n", - " token=hf_token\n", - ")\n", - "\n", - "clear_output()\n", - "print(bar(25))\n", - "\n", - "display_markdown(f'''## Your concept was saved successfully. [Click here to access it](https://huggingface.co/{repo_id})\n", - "''', raw=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "iVqNi8IDzA1Z" - }, - "outputs": [], - "source": [ - "#@markdown #Free Gdrive Space\n", - "\n", - "#@markdown Display the list of sessions from your gdrive and choose which ones to remove.\n", - "\n", - "import ipywidgets as widgets\n", - "\n", - "Sessions=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\")\n", - "\n", - "s = widgets.Select(\n", - " options=Sessions,\n", - " rows=5,\n", - " description='',\n", - " disabled=False\n", - ")\n", - "\n", - "out=widgets.Output()\n", - "\n", - "d = widgets.Button(\n", - " description='Remove',\n", - " disabled=False,\n", - " button_style='warning',\n", - " tooltip='Removet the selected session',\n", - " icon='warning'\n", - ")\n", - "\n", - "def rem(d):\n", - " with out:\n", - " if s.value is not None:\n", - " clear_output()\n", - " print(\"\u001b[1;33mTHE SESSION \u001b[1;31m\"+s.value+\" \u001b[1;33mHAS BEEN REMOVED FROM YOUR GDRIVE\")\n", - " !rm -r '/content/gdrive/MyDrive/Fast-Dreambooth/Sessions/{s.value}'\n", - " s.options=os.listdir(\"/content/gdrive/MyDrive/Fast-Dreambooth/Sessions\")\n", - " else:\n", - " d.close()\n", - " s.close()\n", - " clear_output()\n", - " print(\"\u001b[1;32mNOTHING TO REMOVE\")\n", - "\n", - "d.on_click(rem)\n", - "if s.value is not None:\n", - " display(s,d,out)\n", - "else:\n", - " print(\"\u001b[1;32mNOTHING TO REMOVE\")" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [ - "bbKbx185zqlz", - "AaLtXBbPleBr" - ], - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file From 5287cf7bf202770c4bf100cbd8743e3567710a41 Mon Sep 17 00:00:00 2001 From: zuencap <37028435+zuencap@users.noreply.github.com> Date: Tue, 20 Dec 2022 15:16:41 +0100 Subject: [PATCH 4/4] Revert cellView change --- fast-DreamBooth.ipynb | 1 + 1 file changed, 1 insertion(+) diff --git a/fast-DreamBooth.ipynb b/fast-DreamBooth.ipynb index 0d9031a1..4ac4b918 100644 --- a/fast-DreamBooth.ipynb +++ b/fast-DreamBooth.ipynb @@ -1018,6 +1018,7 @@ "cell_type": "code", "execution_count": null, "metadata": { + "cellView": "form", "id": "iAZGngFcI8hq" }, "outputs": [],