Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Includes 30 custom nodes committed directly, 7 Civitai-exclusive loras stored via Git LFS, and a setup script that installs all dependencies and downloads HuggingFace-hosted models on vast.ai. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
166
custom_nodes/was-node-suite-comfyui/.gitignore
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.code-workspace
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
*.pyc
|
||||
|
||||
# Custom
|
||||
was_suite_settings.json
|
||||
styles.json
|
||||
was_suite_config.json
|
||||
workflows/
|
||||
was_history.json
|
||||
nsp_pantry.json
|
||||
cache/
|
||||
*.latent
|
||||
*.image
|
||||
*.conditioning
|
||||
@@ -0,0 +1,429 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/WASasquatch/was-node-suite-comfyui/blob/main/ComfyUI_%2B_WAS_Node_Suite_and_ComfyUI_Manager.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "aaaaaaaaaa"
|
||||
},
|
||||
"source": [
|
||||
"# <font color=\"#0497e0\">**Comfy**</font>UI + <font color=\"#4684a3\">**WAS** Node Suite</font> [](https://hits.seeyoufarm.com)\n",
|
||||
"A version of ComfyUI Colab with WAS Node Suite installatoin."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "bbbbbbbbbb",
|
||||
"cellView": "form"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title <font color=\"orange\" size=\"5\">Environment Setup</font>\n",
|
||||
"#@markdown <font color=\"#0497e0\">Download and install ComfyUI + WAS Node Suite. You can run this cell again with the `UPDATE_COMFY_UI` or `UPDATE_WAS_NS` options selected to update.\n",
|
||||
"\n",
|
||||
"from pathlib import Path\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"OPTIONS = {}\n",
|
||||
"\n",
|
||||
"#@markdown <font color=\"#48768c\">Store ComfyUI on Google Drive instead of Colab</font>\n",
|
||||
"USE_GOOGLE_DRIVE = True #@param {type:\"boolean\"}\n",
|
||||
"#markdown <font color=\"#48768c\">Update ComfyUI</font>\n",
|
||||
"UPDATE_COMFY_UI = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown <font color=\"#48768c\">Update WAS Node Suite</font>\n",
|
||||
"UPDATE_WAS_NS = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown <font color=\"#48768c\">Update Pillow for WAS NS:</font>\n",
|
||||
"UPDATE_PILLOW = False #@param {type:\"boolean\"}\n",
|
||||
"#@markdown <font color=\"#48768c\">ComfyUI Manager:</font>\n",
|
||||
"USE_COMFYUI_MANAGER = True #@param {type:\"boolean\"}\n",
|
||||
"UPDATE_COMFYUI_MANAGER = True #@param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"WORKSPACE = '/content/ComfyUI'\n",
|
||||
"OPTIONS['USE_GOOGLE_DRIVE'] = USE_GOOGLE_DRIVE\n",
|
||||
"OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI\n",
|
||||
"\n",
|
||||
"if USE_GOOGLE_DRIVE:\n",
|
||||
" !echo \"Mounting Google Drive...\"\n",
|
||||
" %cd /\n",
|
||||
" from google.colab import drive\n",
|
||||
" drive.mount('/content/drive')\n",
|
||||
" WORKSPACE = \"/content/drive/MyDrive/ComfyUI\"\n",
|
||||
" %cd /content/drive/MyDrive\n",
|
||||
"\n",
|
||||
"![ ! -d $WORKSPACE ] && echo -= Initial setup ComfyUI =- && git clone https://github.com/comfyanonymous/ComfyUI $WORKSPACE\n",
|
||||
"%cd $WORKSPACE\n",
|
||||
"\n",
|
||||
"if UPDATE_COMFY_UI:\n",
|
||||
" !echo -= Updating ComfyUI =-\n",
|
||||
" !git pull\n",
|
||||
"\n",
|
||||
"!echo -= Install dependencies =-\n",
|
||||
"!pip install xformers -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118\n",
|
||||
"!git clone https://github.com/WASasquatch/was-node-suite-comfyui $WORKSPACE/custom_nodes/was-node-suite-comfyui\n",
|
||||
"\n",
|
||||
"if USE_COMFYUI_MANAGER:\n",
|
||||
" !git clone https://github.com/ltdrdata/ComfyUI-Manager.git $WORKSPACE/custom_nodes/ComfyUI-Manager\n",
|
||||
"\n",
|
||||
"if UPDATE_WAS_NS:\n",
|
||||
" %cd $WORKSPACE/custom_nodes/was-node-suite-comfyui\n",
|
||||
" !git pull\n",
|
||||
" %cd $WORKSPACE\n",
|
||||
"\n",
|
||||
"if UPDATE_COMFYUI_MANAGER:\n",
|
||||
" %cd $WORKSPACE/custom_nodes/ComfyUI-Manager\n",
|
||||
" !git pull\n",
|
||||
" %cd $WORKSPACE\n",
|
||||
"\n",
|
||||
"if UPDATE_PILLOW:\n",
|
||||
" !pip install --upgrade --force-reinstall pillow\n",
|
||||
" print('\\n\\033[91m\\033[1mRestarting runtime for Pillow Update. Run this cell again without `UPDATE_PILLOW` selected!\\033[0m')\n",
|
||||
" time.sleep(5)\n",
|
||||
"\n",
|
||||
" import os\n",
|
||||
" os.kill(os.getpid(), 9)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cccccccccc"
|
||||
},
|
||||
"source": [
|
||||
"Download some models/checkpoints/vae or custom comfyui nodes (uncomment the commands for the ones you want)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "dddddddddd",
|
||||
"cellView": "form"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title <font color=\"orange\" size=\"5\">Download Models</font>\n",
|
||||
"#@markdown <font color=\"#0497e0\">Download models and other resources to use in ComfyUI. Select your options and run the this cell. Can run multiple times with different options.</font>\n",
|
||||
"\n",
|
||||
"#@markdown ---\n",
|
||||
"\n",
|
||||
"# Checkpoints\n",
|
||||
"\n",
|
||||
"MODEL_OPTION = 'stable-diffusion-xl-base-1.0.safetensors' #@param['None', 'stable-diffusion-xl-base-1.0.safetensors', 'sd_xl_base_1.0_0.9vae.safetensors', 'sd_xl_base_0.9.safetensors', 'v1-5-pruned-emaonly.ckpt', 'v2-1_512-ema-pruned.safetensors', 'v2-1_768-ema-pruned.safetensors', 'AbyssOrangeMix2_hard.safetensors', 'AOM3A1_orangemixs.safetensors', 'AOM3A3_orangemixs.safetensors', 'wd-1-5-beta2-fp16.safetensors']\n",
|
||||
"VAE_OPTION = 'vae-ft-mse-840000-ema-pruned.safetensors' #@param['None', 'vae-ft-mse-840000-ema-pruned.safetensors', 'orangemix.vae.pt', 'kl-f8-anime2.ckpt']\n",
|
||||
"UPSCALE_MODEL_OPTION = 'None' #@param['None', 'RealESRGAN_x2.pth', 'RealESRGAN_x4.pth', '4x-UltraSharp', '4x_RealisticRescaler_100000_G.pth', 'BSRGAN.pth', 'BSRGANx2.pth']\n",
|
||||
"LORA_OPTION = 'None' #@param['None', 'theovercomer8sContrastFix_sd21768.safetensors', 'theovercomer8sContrastFix_sd15.safetensors']\n",
|
||||
"T2I_OPTION = 'None' #@param['None', 't2iadapter_depth_sd14v1.pth', 't2iadapter_seg_sd14v1.pth', 't2iadapter_sketch_sd14v1.pth', 't2iadapter_keypose_sd14v1.pth', 't2iadapter_openpose_sd14v1.pth', 't2iadapter_color_sd14v1.pth', 't2iadapter_canny_sd14v1.pth', '/t2iadapter_style_sd14v1.pth']\n",
|
||||
"CONTROLNET_OPTION = 'None' #@param['None', 'control_depth-fp16.safetensors', 'control_scribble-fp16.safetensors', 'control_openpose-fp16.safetensors']\n",
|
||||
"\n",
|
||||
"#@markdown ---\n",
|
||||
"\n",
|
||||
"#@markdown <font color=\"#0497e0\">**Download and instlal CLIPVision**:</font>\n",
|
||||
"DOWNLOAD_CLIPVISION = False #@param {type:\"boolean\"}\n",
|
||||
"#@markdown <font color=\"#0497e0\">**ControlNet Preprocessor Nodes** by Fannovel16:</font>\n",
|
||||
"INSTALL_CONTROLNET_NODES = False #@param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"# SDXL\n",
|
||||
"if MODEL_OPTION == 'stable-diffusion-xl-base-1.0.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"if MODEL_OPTION == 'sd_xl_base_1.0_0.9vae.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"if MODEL_OPTION == 'sd_xl_base_0.9.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/resolve/main/sd_xl_base_0.9.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# SD1.5\n",
|
||||
"if MODEL_OPTION == 'v1-5-pruned-emaonly.ckpt':\n",
|
||||
" !wget -c https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -P $WORKSPACE/models/checkpoints/\n",
|
||||
"\n",
|
||||
"# SD2\n",
|
||||
"if MODEL_OPTION == 'v2-1_512-ema-pruned.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"if MODEL_OPTION == 'v2-1_768-ema-pruned.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"\n",
|
||||
"# Some SD1.5 anime style\n",
|
||||
"if MODEL_OPTION == 'AbyssOrangeMix2_hard.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"if MODEL_OPTION == 'AOM3A1_orangemixs.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1_orangemixs.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"if MODEL_OPTION == 'AOM3A3_orangemixs.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A3_orangemixs.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"if MODEL_OPTION == 'anything-v3-fp16-pruned.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-fp16-pruned.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"\n",
|
||||
"# Waifu Diffusion 1.5 (anime style SD2.x 768-v)\n",
|
||||
"if MODEL_OPTION == 'wd-1-5-beta2-fp16.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/waifu-diffusion/wd-1-5-beta2/resolve/main/checkpoints/wd-1-5-beta2-fp16.safetensors -P $WORKSPACE/models/checkpoints/\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# VAE\n",
|
||||
"if VAE_OPTION == 'vae-ft-mse-840000-ema-pruned.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors -P $WORKSPACE/models/vae/\n",
|
||||
"if VAE_OPTION == 'orangemix.vae.pt':\n",
|
||||
" !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt -P $WORKSPACE/models/vae/'\n",
|
||||
"if VAE_OPTION == 'kl-f8-anime2.ckpt':\n",
|
||||
" !wget -c https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/vae/kl-f8-anime2.ckpt -P $WORKSPACE/models/vae/\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Loras\n",
|
||||
"if LORA_OPTION == 'theovercomer8sContrastFix_sd21768.safetensors':\n",
|
||||
" !wget -c https://civitai.com/api/download/models/10350 -O $WORKSPACE/models/loras/theovercomer8sContrastFix_sd21768.safetensors #theovercomer8sContrastFix SD2.x 768-v\n",
|
||||
"if LORA_OPTION == 'theovercomer8sContrastFix_sd15.safetensors':\n",
|
||||
" !wget -c https://civitai.com/api/download/models/10638 -O $WORKSPACE/models/loras/theovercomer8sContrastFix_sd15.safetensors #theovercomer8sContrastFix SD1.x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# T2I-Adapter\n",
|
||||
"if T2I_OPTION == 't2iadapter_depth_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth -P $WORKSPACE/models/controlnet/\n",
|
||||
"if T2I_OPTION == 't2iadapter_seg_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth -P $WORKSPACE/models/controlnet/\n",
|
||||
"if T2I_OPTION == 't2iadapter_sketch_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth -P $WORKSPACE/models/controlnet/\n",
|
||||
"if T2I_OPTION == 't2iadapter_keypose_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth -P $WORKSPACE/models/controlnet/\n",
|
||||
"if T2I_OPTION == 't2iadapter_openpose_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth -P $WORKSPACE/models/controlnet/\n",
|
||||
"if T2I_OPTION == 't2iadapter_color_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth -P $WORKSPACE/models/controlnet/\n",
|
||||
"if T2I_OPTION == 't2iadapter_canny_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth -P $WORKSPACE/models/controlnet/\n",
|
||||
"\n",
|
||||
"# T2I Styles Model\n",
|
||||
"if T2I_OPTION == '/t2iadapter_style_sd14v1.pth':\n",
|
||||
" !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_style_sd14v1.pth -P $WORKSPACE/models/style_models/\n",
|
||||
"\n",
|
||||
"# CLIPVision model (needed for styles model)\n",
|
||||
"if DOWNLOAD_CLIPVISION:\n",
|
||||
" !wget -c https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin -O $WORKSPACE/models/clip_vision/clip_vit14.bin\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# ControlNet\n",
|
||||
"if CONTROLNET_OPTION == 'control_depth-fp16.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_depth-fp16.safetensors -P $WORKSPACE/models/controlnet/\n",
|
||||
"if CONTROLNET_OPTION == 'control_scribble-fp16.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_scribble-fp16.safetensors -P $WORKSPACE/models/controlnet/\n",
|
||||
"if CONTROLNET_OPTION == 'control_openpose-fp16.safetensors':\n",
|
||||
" !wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_openpose-fp16.safetensors -P $WORKSPACE/models/controlnet/\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Controlnet Preprocessor nodes by Fannovel16\n",
|
||||
"if INSTALL_CONTROLNET_NODES:\n",
|
||||
" !cd custom_nodes && git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors; cd comfy_controlnet_preprocessors && python install.py\n",
|
||||
"\n",
|
||||
"# ESRGAN upscale model\n",
|
||||
"if UPSCALE_MODEL_OPTION == 'RealESRGAN_x2.pth':\n",
|
||||
" !wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth -P $WORKSPACE/models/upscale_models/\n",
|
||||
"if UPSCALE_MODEL_OPTION == 'RealESRGAN_x4.pth':\n",
|
||||
" !wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth -P $WORKSPACE/models/upscale_models/\n",
|
||||
"if UPSCALE_MODEL_OPTION == '4x-UltraSharp':\n",
|
||||
" !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/4x-UltraSharp.pth -P $WORKSPACE/models/upscale_models/\n",
|
||||
"if UPSCALE_MODEL_OPTION == '4x_RealisticRescaler_100000_G.pth':\n",
|
||||
" !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/4x_RealisticRescaler_100000_G.pth -P $WORKSPACE/models/upscale_models/\n",
|
||||
"if UPSCALE_MODEL_OPTION == 'BSRGAN.pth':\n",
|
||||
" !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/BSRGAN.pth -P $WORKSPACE/models/upscale_models/\n",
|
||||
"if UPSCALE_MODEL_OPTION == 'BSRGANx2.pth':\n",
|
||||
" !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/BSRGANx2.pth -P $WORKSPACE/models/upscale_models/\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title <font size=\"5\" color=\"orange\">Direct Download Models</font>\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"types = {\n",
|
||||
" 'CHECKPOINTS': os.path.join(WORKSPACE, 'models/checkpoints'),\n",
|
||||
" 'CLIP': os.path.join(WORKSPACE, 'models/clip'),\n",
|
||||
" 'CLIP_VISION': os.path.join(WORKSPACE, 'models/clip_vision'),\n",
|
||||
" 'CONFIGS': os.path.join(WORKSPACE, 'models/configs'),\n",
|
||||
" 'CONTROLNET': os.path.join(WORKSPACE, 'models/controlnet'),\n",
|
||||
" 'DIFFUSERS': os.path.join(WORKSPACE, 'models/diffusers'),\n",
|
||||
" 'EMBEDDINGS': os.path.join(WORKSPACE, 'models/embeddings'),\n",
|
||||
" 'GLIGEN': os.path.join(WORKSPACE, 'models/gligen'),\n",
|
||||
" 'HYPERNETWORKS': os.path.join(WORKSPACE, 'models/hypernetworks'),\n",
|
||||
" 'LORAS': os.path.join(WORKSPACE, 'models/loras'),\n",
|
||||
" 'STYLE_MODEL': os.path.join(WORKSPACE, 'models/style_models'),\n",
|
||||
" 'UNET': os.path.join(WORKSPACE, 'models/unet'),\n",
|
||||
" 'UPSCALE_MODELS': os.path.join(WORKSPACE, 'models/upscale_models'),\n",
|
||||
" 'VAE': os.path.join(WORKSPACE, 'models/vae'),\n",
|
||||
" 'VAE_APPROX': os.path.join(WORKSPACE, 'models/vae_approx')\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"TYPE = 'CHECKPOINTS' #@param ['CHECKPOINTS', 'CLIP', 'CLIP_VISION', 'CONFIGS', 'CONTROLNET', 'DIFFUSERS', 'EMBEDDINGS', 'GLIGEN', 'HYPERNETWORKS', 'LORAS', 'STYLE_MODEL', 'UNET', 'UPSCALE_MODELS', 'VAE', 'VAE_APPROX']\n",
|
||||
"DIRECT_URL = 'https://civitai.com/api/download/models/141627' #@param {type:\"string\"}\n",
|
||||
"SAVE_AS = 'Differentia_V1.safetensors' #@param {type: 'string'}\n",
|
||||
"#@markdown <font color=\"0497e0\">Direct link to the download. The example URL is Differentia (https://civitai.com/models/129232)</font>\n",
|
||||
"\n",
|
||||
"if TYPE and DIRECT_URL:\n",
|
||||
" target = os.path.join(types[TYPE], SAVE_AS)\n",
|
||||
" !wget -c $DIRECT_URL -O $target"
|
||||
],
|
||||
"metadata": {
|
||||
"cellView": "form",
|
||||
"id": "Bo0pf4So3tCK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title <font size=\"5\" color=\"orange\">ComfyUI Cloudfare (Recommended)</font>\n",
|
||||
"#@markdown <font color=\"0497e0\">Running ComfyUI with Cloudfare is now the recommended method.</font>\n",
|
||||
"!wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb\n",
|
||||
"!dpkg -i cloudflared-linux-amd64.deb\n",
|
||||
"\n",
|
||||
"import subprocess\n",
|
||||
"import threading\n",
|
||||
"import time\n",
|
||||
"import socket\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"def iframe_thread(port):\n",
|
||||
" while True:\n",
|
||||
" time.sleep(0.5)\n",
|
||||
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
||||
" result = sock.connect_ex(('127.0.0.1', port))\n",
|
||||
" if result == 0:\n",
|
||||
" break\n",
|
||||
" sock.close()\n",
|
||||
" print(\"\\nComfyUI finished loading, trying to launch cloudflared (if it gets stuck here cloudflared is having issues)\\n\")\n",
|
||||
"\n",
|
||||
" p = subprocess.Popen([\"cloudflared\", \"tunnel\", \"--url\", \"http://127.0.0.1:{}\".format(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n",
|
||||
" for line in p.stderr:\n",
|
||||
" l = line.decode()\n",
|
||||
" if \"trycloudflare.com \" in l:\n",
|
||||
" print(\"This is the URL to access ComfyUI:\", l[l.find(\"http\"):], end='')\n",
|
||||
" #print(l, end='')\n",
|
||||
"\n",
|
||||
"threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
|
||||
"\n",
|
||||
"!python main.py --dont-print-server"
|
||||
],
|
||||
"metadata": {
|
||||
"cellView": "form",
|
||||
"id": "StSynv5tp2nL"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "jjjjjjjjjjjjj",
|
||||
"cellView": "form"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title <font size=\"5\" color=\"orange\">ComfyUI Localtunnel</font>\n",
|
||||
"#@markdown <font color=\"0497e0\">Run this cell to start ComfyUI. You'll see a link similar to `your url is: https://slow-yaks-jog-34-72-173-3.loca.lt` (example)</font>\n",
|
||||
"#@markdown <br>*If you have trouble with the red screen of death \"reminder\" not letting you generate, use the iFrame version below.*\n",
|
||||
"!npm install -g localtunnel\n",
|
||||
"\n",
|
||||
"import subprocess\n",
|
||||
"import threading\n",
|
||||
"import time\n",
|
||||
"import socket\n",
|
||||
"import urllib.request\n",
|
||||
"\n",
|
||||
"def iframe_thread(port):\n",
|
||||
" while True:\n",
|
||||
" time.sleep(0.5)\n",
|
||||
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
||||
" result = sock.connect_ex(('127.0.0.1', port))\n",
|
||||
" if result == 0:\n",
|
||||
" break\n",
|
||||
" sock.close()\n",
|
||||
" print(\"\\nComfyUI finished loading, trying to launch localtunnel (if it gets stuck here localtunnel is having issues)\\n\")\n",
|
||||
"\n",
|
||||
" print(\"The password/enpoint ip for localtunnel is:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
|
||||
" p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
|
||||
" for line in p.stdout:\n",
|
||||
" print(line.decode(), end='')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
|
||||
"\n",
|
||||
"!python main.py --dont-print-server"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "gggggggggg"
|
||||
},
|
||||
"source": [
|
||||
"### Run ComfyUI with colab iframe ***(use only in case the previous ways don't work)***\n",
|
||||
"\n",
|
||||
"You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n",
|
||||
"\n",
|
||||
"If you want to open it in another window use the link.\n",
|
||||
"\n",
|
||||
"Note that some UI features like live image previews won't work because the colab iframe blocks websockets."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "hhhhhhhhhh",
|
||||
"cellView": "form"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title <font size=\"5\" color=\"orange\">ComfyUI iFrame</font>\n",
|
||||
"import threading\n",
|
||||
"import time\n",
|
||||
"import socket\n",
|
||||
"def iframe_thread(port):\n",
|
||||
" while True:\n",
|
||||
" time.sleep(0.5)\n",
|
||||
" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
|
||||
" result = sock.connect_ex(('127.0.0.1', port))\n",
|
||||
" if result == 0:\n",
|
||||
" break\n",
|
||||
" sock.close()\n",
|
||||
" from google.colab import output\n",
|
||||
" output.serve_kernel_port_as_iframe(port, height=1024)\n",
|
||||
" print(\"to open it in a window you can open this link here:\")\n",
|
||||
" output.serve_kernel_port_as_window(port)\n",
|
||||
"\n",
|
||||
"threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
|
||||
"\n",
|
||||
"!python main.py --dont-print-server"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"private_outputs": true,
|
||||
"gpuType": "T4",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
21
custom_nodes/was-node-suite-comfyui/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Jordan Thompson (WASasquatch)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
415
custom_nodes/was-node-suite-comfyui/README.md
Normal file
@@ -0,0 +1,415 @@
|
||||
# **WAS** Node Suite [](https://colab.research.google.com/github/WASasquatch/was-node-suite-comfyui/blob/main/ComfyUI_%2B_WAS_Node_Suite_and_ComfyUI_Manager.ipynb) [](https://hits.seeyoufarm.com) [](https://paypal.me/ThompsonJordan?country.x=US&locale.x=en_US)
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/1151589/228982359-4a6215cc-3ca9-4c24-8a7b-d229d7bce277.png">
|
||||
</p>
|
||||
|
||||
### A node suite for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) with many new nodes, such as image processing, text processing, and more.
|
||||
|
||||
#### [Share Workflows](https://github.com/WASasquatch/was-node-suite-comfyui/wiki/Workflow-Examples) to the workflows wiki. Preferably embedded PNGs with workflows, but JSON is OK too. [You can use this tool to add a workflow to a PNG file easily](https://colab.research.google.com/drive/1hQMjNUdhMQ3rw1Wcm3_umvmOMeS_K4s8?usp=sharing).
|
||||
#### Consider [donating to the project](https://paypal.me/ThompsonJordan?country.x=US&locale.x=en_US) to help it's continued development.
|
||||
|
||||
# Important Updates
|
||||
|
||||
- **12/15/2023** WAS-NS is not under active development. I do not have the time and have other obligations. Feel free to fork and continue the project. I will approve appropriate and beneficial PRs.
|
||||
- **[Updated 10/8/2023]** BLIP is now a shipped module of WAS-NS and no longer requires the BLIP Repo
|
||||
- **[Updated 5/29/2023]** `ASCII` **is deprecated**. The new preferred method of text node output is `STRING`. This is a change from `ASCII` so that it is more clear what data is being passed.
|
||||
- The `was_suite_config.json` will automatically set `use_legacy_ascii_text` to `false`.
|
||||
- [Video Nodes](https://github.com/WASasquatch/was-node-suite-comfyui#video-nodes) - There are two new video nodes, `Write to Video` and `Create Video from Path`. These are experimental nodes.
|
||||
|
||||
# Current Nodes:
|
||||
|
||||
### There is documentation from [Salt AI](https://getsalt.ai/) available here: https://docs.getsalt.ai/md/was-node-suite-comfyui/
|
||||
|
||||
<details>
|
||||
<summary>$\Large\color{orange}{Expand\ Node\ List}$</summary>
|
||||
|
||||
<br/>
|
||||
|
||||
- BLIP Model Loader: Load a BLIP model to input into the BLIP Analyze node
|
||||
- BLIP Analyze Image: Get a text caption from a image, or interrogate the image with a question.
|
||||
- Model will download automatically from default URL, but you can point the download to another location/caption model in `was_suite_config`
|
||||
- Models will be stored in `ComfyUI/models/blip/checkpoints/`
|
||||
- SAM Model Loader: Load a SAM Segmentation model
|
||||
- SAM Parameters: Define your SAM parameters for segmentation of a image
|
||||
- SAM Parameters Combine: Combine SAM parameters
|
||||
- SAM Image Mask: SAM image masking
|
||||
- Image Bounds: Bounds a image
|
||||
- Inset Image Bounds: Inset a image bounds
|
||||
- Bounded Image Blend: Blend bounds image
|
||||
- Bounded Image Blend with Mask: Blend a bounds image by mask
|
||||
- Bounded Image Crop: Crop a bounds image
|
||||
- Bounded Image Crop with Mask: Crop a bounds image by mask
|
||||
- Bus Node: condense the 5 common connectors into one, keep your workspace tidy (Model, Clip, VAE, Positive Conditioning, Negative Conditioning)
|
||||
- Cache Node: Cache Latnet, Tensor Batches (Image), and Conditioning to disk to use later.
|
||||
- CLIPTextEncode (NSP): Parse noodle soups from the NSP pantry, or parse wildcards from a directory containing A1111 style wildacrds.
|
||||
- Wildcards are in the style of `__filename__`, which also includes subdirectories like `__appearance/haircolour__` (if you noodle_key is set to `__`)
|
||||
- You can set a custom wildcards path in `was_suite_config.json` file with key:
|
||||
- ` "wildcards_path": "E:\\python\\automatic\\webui3\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards"`
|
||||
- If no path is set the wildcards dir is located at the root of WAS Node Suite as `/wildcards`
|
||||
- CLIP Input Switch: Switch between two CLIP inputs based on a boolean switch.
|
||||
- CLIP Vision Input Switch: Switch between two CLIP Vision inputs based on a boolean switch.
|
||||
- Conditioning Input Switch: Switch between two conditioning inputs.
|
||||
- Constant Number
|
||||
- Control Net Model Input Switch: Switch between two Control Net Model inputs based on a boolean switch.
|
||||
- Create Grid Image: Create a image grid from images at a destination with customizable glob pattern. Optional border size and color.
|
||||
- Create Grid Image from Batch: Create a grid image from a batch tensor of images.
|
||||
- Create Morph Image: Create a GIF/APNG animation from two images, fading between them.
|
||||
- Create Morph Image by Path: Create a GIF/APNG animation from a path to a directory containing images, with optional pattern.
|
||||
- Create Video from Path: Create video from images from a specified path.
|
||||
- CLIPSeg Masking: Mask a image with CLIPSeg and return a raw mask
|
||||
- CLIPSeg Masking Batch: Create a batch image (from image inputs) and batch mask with CLIPSeg
|
||||
- Dictionary to Console: Print a dictionary input to the console
|
||||
- Image Analyze
|
||||
- Black White Levels
|
||||
- RGB Levels
|
||||
- Depends on `matplotlib`, will attempt to install on first run
|
||||
- Diffusers Hub Down-Loader: Download a diffusers model from the HuggingFace Hub and load it
|
||||
- Image SSAO (Ambient Occlusion): [Expiremental Beta Node] Create Screen Space Ambient Occlusion with a image and MiDaS depth approximation (or provided depth map).
|
||||
- Image SSDO (Direct Occlusion): [Expiremental Beta Node] Create a Screen Space Direct Occlusion with a image input. Direct Occlusion presents you with direct lighting highliths, similar to how Ambient Occlusion finds the crevices and shadowy areas around objets.
|
||||
- Image Aspect Ratio: Fetch image aspect ratio in float format, common format (eg 16:9), and in if the image is portrait, landscape, or square.
|
||||
- Image Batch: Create one batch out of multiple batched tensors.
|
||||
- Image Blank: Create a blank image in any color
|
||||
- Image Blend by Mask: Blend two images by a mask
|
||||
- Image Blend: Blend two images by opacity
|
||||
- Image Blending Mode: Blend two images by various blending modes
|
||||
- Image Bloom Filter: Apply a high-pass based bloom filter
|
||||
- Image Canny Filter: Apply a canny filter to a image
|
||||
- Image Chromatic Aberration: Apply chromatic aberration lens effect to a image like in sci-fi films, movie theaters, and video games
|
||||
- Image Color Palette
|
||||
- Generate a color palette based on the input image.
|
||||
- Depends on `scikit-learn`, will attempt to install on first run.
|
||||
- Supports color range of 8-256
|
||||
- Utilizes font in `./res/` unless unavailable, then it will utilize internal better then nothing font.
|
||||
- Image Crop Face: Crop a face out of a image
|
||||
- **Limitations:**
|
||||
- Sometimes no faces are found in badly generated images, or faces at angles
|
||||
- Sometimes face crop is black, this is because the padding is too large and intersected with the image edge. Use a smaller padding size.
|
||||
- face_recognition mode sometimes finds random things as faces. It also requires a [CUDA] GPU.
|
||||
- Only detects one face. This is a design choice to make it's use easy.
|
||||
- **Notes:**
|
||||
- Detection runs in succession. If nothing is found with the selected detection cascades, it will try the next available cascades file.
|
||||
- Image Crop Location: Crop a image to specified location in top, left, right, and bottom locations relating to the pixel dimensions of the image in X and Y coordinats.
|
||||
- Image Crop Square Location: Crop a location by X/Y center, creating a square crop around that point.
|
||||
- Image Displacement Warp: Warp a image by a displacement map image by a given amplitude.
|
||||
- Image Dragan Photography Filter: Apply a Andrzej Dragan photography style to a image
|
||||
- Image Edge Detection Filter: Detect edges in a image
|
||||
- Image Film Grain: Apply film grain to a image
|
||||
- Image Filter Adjustments: Apply various image adjustments to a image
|
||||
- Image Flip: Flip a image horizontal, or vertical
|
||||
- Image Gradient Map: Apply a gradient map to a image
|
||||
- Image Generate Gradient: Generate a gradient map with desired stops and colors
|
||||
- Image High Pass Filter: Apply a high frequency pass to the image returning the details
|
||||
- Image History Loader: Load images from history based on the Load Image Batch node. Can define max history in config file. *(requires restart to show last sessions files at this time)*
|
||||
- Image Input Switch: Switch between two image inputs based on a boolean switch
|
||||
- Image Levels Adjustment: Adjust the levels of a image
|
||||
- Image Load: Load a *image* from any path on the system, or a url starting with `http`
|
||||
- Image Median Filter: Apply a median filter to a image, such as to smooth out details in surfaces
|
||||
- Image Mix RGB Channels: Mix together RGB channels into a single iamge
|
||||
- Image Monitor Effects Filter: Apply various monitor effects to a image
|
||||
- Digital Distortion
|
||||
- A digital breakup distortion effect
|
||||
- Signal Distortion
|
||||
- A analog signal distortion effect on vertical bands like a CRT monitor
|
||||
- TV Distortion
|
||||
- A TV scanline and bleed distortion effect
|
||||
- Image Nova Filter: A image that uses a sinus frequency to break apart a image into RGB frequencies
|
||||
- Image Perlin Noise: Generate perlin noise
|
||||
- Image Perlin Power Fractal: Generate a perlin power fractal
|
||||
- Image Paste Face Crop: Paste face crop back on a image at it's original location and size
|
||||
- Features a better blending funciton than GFPGAN/CodeFormer so there shouldn't be visible seams, and coupled with Diffusion Result, looks better than GFPGAN/CodeFormer.
|
||||
- Image Paste Crop: Paste a crop (such as from Image Crop Location) at it's original location and size utilizing the `crop_data` node input. This uses a different blending algorithm then Image Paste Face Crop, which may be desired in certain instances.
|
||||
- Image Power Noise: Generate power-law noise
|
||||
- frequency: The frequency parameter controls the distribution of the noise across different frequencies. In the context of Fourier analysis, higher frequencies represent fine details or high-frequency components, while lower frequencies represent coarse details or low-frequency components. Adjusting the frequency parameter can result in different textures and levels of detail in the generated noise. The specific range and meaning of the frequency parameter may vary depending on the noise type.
|
||||
- attenuation: The attenuation parameter determines the strength or intensity of the noise. It controls how much the noise values deviate from the mean or central value. Higher values of attenuation lead to more significant variations and a stronger presence of noise, while lower values result in a smoother and less noticeable noise. The specific range and interpretation of the attenuation parameter may vary depending on the noise type.
|
||||
- noise_type: The tyoe of Power-Law noise to generate (white, grey, pink, green, blue)
|
||||
- Image Paste Crop by Location: Paste a crop top a custom location. This uses the same blending algorithm as Image Paste Crop.
|
||||
- Image Pixelate: Turn a image into pixel art! Define the max number of colors, the pixelation mode, the random state, and max iterations, and max those sprites shine.
|
||||
- Image Remove Background (Alpha): Remove the background from a image by threshold and tolerance.
|
||||
- Image Remove Color: Remove a color from a image and replace it with another
|
||||
- Image Resize
|
||||
- Image Rotate: Rotate an image
|
||||
- Image Rotate Hue: Rotate the hue of a image. A hue_shift of `0.0` would represent no change, and `1.0` would represent a full circle of the hue, and also exhibit no change.
|
||||
- Image Save: A save image node with format support and path support.
|
||||
- `show_history` will show previously saved images with the WAS Save Image node. ComfyUI unfortunately resizes displayed images to the same size however, so if images are in different sizes it will force them in a different size.
|
||||
- Doesn't display images saved outside `/ComfyUI/output/`
|
||||
- You can save as `webp` if you have webp available to you system. On windows you can get that support with this [precompiled libarary](https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-1.3.0-windows-x64.zip) from the [webp project](https://developers.google.com/speed/webp/download). On linux you can run `apt-get install webp`.
|
||||
- Image Seamless Texture: Create a seamless texture out of a image with optional tiling
|
||||
- Image Select Channel: Select a single channel of an RGB image
|
||||
- Image Select Color: Return the select image only on a black canvas
|
||||
- Image Shadows and Highlights: Adjust the shadows and highlights of an image
|
||||
- Image Size to Number: Get the `width` and `height` of an input image to use with **Number** nodes.
|
||||
- Image Stitch: Stitch images together on different sides with optional feathering blending between them.
|
||||
- Image Style Filter: Style a image with Pilgram instragram-like filters
|
||||
- Depends on `pilgram` module
|
||||
- Image Threshold: Return the desired threshold range of a image
|
||||
- Image Tile: Split a image up into a image batch of tiles. Can be used with Tensor Batch to Image to select a individual tile from the batch.
|
||||
- Image Transpose
|
||||
- Image fDOF Filter: Apply a fake depth of field effect to an image
|
||||
- Image to Latent Mask: Convert a image into a latent mask
|
||||
- Image to Noise: Convert a image into noise, useful for init blending or init input to theme a diffusion.
|
||||
- Images to RGB: Convert a tensor image batch to RGB if they are RGBA or some other mode.
|
||||
- Image to Seed: Convert a image to a reproducible seed
|
||||
- Image Voronoi Noise Filter
|
||||
- A custom implementation of the worley voronoi noise diagram
|
||||
- Input Switch (Disable until `*` wildcard fix)
|
||||
- KSampler (WAS): A sampler that accepts a seed as a node inputs
|
||||
- KSampler Cycle: A KSampler able to do HR pass loops, you can specify an upscale factor, and how many steps to achieve that factor. Accepts a upscale_model, as well as a 1x processor model. A secondary diffusion model can also be used.
|
||||
- Load Cache: Load cached Latent, Tensor Batch (image), and Conditioning files.
|
||||
- Load Text File
|
||||
- Now supports outputting a dictionary named after the file, or custom input.
|
||||
- The dictionary contains a list of all lines in the file.
|
||||
- Load Batch Images
|
||||
- Increment images in a folder, or fetch a single image out of a batch.
|
||||
- Will reset it's place if the path, or pattern is changed.
|
||||
- pattern is a glob that allows you to do things like `**/*` to get all files in the directory and subdirectory
|
||||
or things like `*.jpg` to select only JPEG images in the directory specified.
|
||||
- Mask to Image: Convert `MASK` to `IMAGE`
|
||||
- Mask Batch to Mask: Return a single mask from a batch of masks
|
||||
- Mask Invert: Invert a mask.
|
||||
- Mask Add: Add masks together.
|
||||
- Mask Subtract: Subtract from a mask by another.
|
||||
- Mask Dominant Region: Return the dominant region in a mask (the largest area)
|
||||
- Mask Minority Region: Return the smallest region in a mask (the smallest area)
|
||||
- Mask Crop Dominant Region: Crop mask to the dominant region with optional padding in pixels
|
||||
- Mask Crop Minority Region: Crop mask to the minority region with optional padding in pixels
|
||||
- Mask Crop Region: Crop to dominant or minority region and return `crop_data` for pasting back. Additionally outputs region location and size for other nodes like Crop Image Location.
|
||||
- Mask Arbitrary Region: Return a region that most closely matches the size input (size is not a direct representation of pixels, but approximate)
|
||||
- Mask Smooth Region: Smooth the boundaries of a mask
|
||||
- Mask Erode Region: Erode the boundaries of a mask
|
||||
- Mask Dilate Region: Dilate the boundaries of a mask
|
||||
- Mask Fill Region: Fill holes within the masks regions
|
||||
- Mask Ceiling Region": Return only white pixels within a offset range.
|
||||
- Mask Floor Region: Return the lower most pixel values as white (255)
|
||||
- Mask Threshold Region: Apply a thresholded image between a black value and white value
|
||||
- Mask Gaussian Region: Apply a Gaussian blur to the mask
|
||||
- Mask Rect Area: Create a rectangular mask defined by percentages.
|
||||
- Mask Rect Area (Advanced): Create a rectangular mask defined by pixels and image size.
|
||||
- Masks Combine Masks: Combine 2 or more masks into one mask.
|
||||
- Masks Combine Batch: Combine batched masks into one mask.
|
||||
- Model Input Switch: Switch between two model inputs based on a boolean switch
|
||||
- ComfyUI Loaders: A set of ComfyUI loaders that also output a string that contains the name of the model being loaded.
|
||||
- Latent Noise Injection: Inject latent noise into a latent image
|
||||
- Latent Size to Number: Latent sizes in tensor width/height
|
||||
- Latent Upscale by Factor: Upscale a latent image by a factor
|
||||
- Latent Input Switch: Switch between two latent inputs based on a boolean switch
|
||||
- Logic Boolean: A simple `1` or `0` output to use with logic
|
||||
- Logic Boolean Primitive: True/False boolean input, to use with native boolean nodes
|
||||
- Logic AND: Given 2 booleans, performs "AND"
|
||||
- Logic OR: Given 2 booleans, performs "OR"
|
||||
- Logic XOR: Given 2 booleans, performs "!="
|
||||
- Logic NOT: Given 1 boolean, returns the opposite
|
||||
- Lora Input Switch: Switch between two LORAs based on a boolean switch
|
||||
- MiDaS Model Loader: Load a MiDaS model as an optional input for MiDaS Depth Approximation
|
||||
- MiDaS Depth Approximation: Produce a depth approximation of a single image input
|
||||
- MiDaS Mask Image: Mask a input image using MiDaS with a desired color
|
||||
- Number Operation
|
||||
- Number to Seed
|
||||
- Number to Float
|
||||
- Number Input Switch: Switch between two number inputs based on a boolean switch
|
||||
- Number Input Condition: Compare between two inputs or against the A input
|
||||
- Number to Int
|
||||
- Number to String
|
||||
- Number to Text
|
||||
- Boolean to Text
|
||||
- Perlin Power Fractal Latent: Create a power fractal based latent image. Doesn't work with all samplers (unless you add noise).
|
||||
- Random Number
|
||||
- Random integer between min and max (inclusive), uniformly distributed random number
|
||||
- Random float between min and max (inclusive), uniformly distributed random number
|
||||
- Random number from 0 to 1 inclusive, this will be a 0 or 1 boolean if you use the 'int' output
|
||||
- Random shuffled list of integers between min and max inclusive. E.g. if min=0 and max=3, a possible outcome would be the string '3,1,2,0'
|
||||
- Save Text File: Save a text string to a file
|
||||
- Samples Passthrough (Stat System): Logs RAM, VRAM, and Disk usage to the console.
|
||||
- Seed: Return a seed
|
||||
- Tensor Batch to Image: Select a single image out of a latent batch for post processing with filters
|
||||
- Text Add Tokens: Add custom tokens to parse in filenames or other text.
|
||||
- Text Add Token by Input: Add custom token by inputs representing single **single line** name and value of the token
|
||||
- Text Compare: Compare two strings. Returns a boolean if they are the same, a score of similarity, and the similarity or difference text.
|
||||
- Text Concatenate: Merge two strings
|
||||
- Text Dictionary Update: Merge two dictionaries
|
||||
- Text Dictionary Get: Get a value from a dictionary (as a string)
|
||||
- Text Dictionary Convert: Convert text to dictionary object
|
||||
- Text Dictionary New: Create a new dictionary
|
||||
- Text Dictionary Keys: Returns the keys, as a list from a dictionary object
|
||||
- Text Dictionary To Text: Returns the dictionary as text
|
||||
- Text File History: Show previously opened text files *(requires restart to show last sessions files at this time)*
|
||||
- Text Find: Find a substring or pattern within another string. Returns boolean
|
||||
- Text Find and Replace: Find and replace a substring in a string
|
||||
- Text Find and Replace by Dictionary: Replace substrings in a ASCII text input with a dictionary.
|
||||
- The dictionary keys are used as the key to replace, and the list of lines it contains chosen at random based on the seed.
|
||||
- Text Input Switch: Switch between two text inputs
|
||||
- Text List: Create a list of text strings
|
||||
- Text Load Line From File: Load lines from a file sequentially each *batch prompt* run, or select a line index.
|
||||
- Text Concatenate: Merge lists of strings
|
||||
- Text Contains: Checks if substring is in another string (case insensitive optional)
|
||||
- Text Multiline: Write a multiline text string
|
||||
- Text Parse A1111 Embeddings: Convert embeddings filenames in your prompts to `embedding:[filename]]` format based on your `/ComfyUI/models/embeddings/` files.
|
||||
- Text Parse Noodle Soup Prompts: Parse NSP in a text input
|
||||
- Text Parse Tokens: Parse custom tokens in text.
|
||||
- Text Random Line: Select a random line from a text input string
|
||||
- Text Random Prompt: Feeling lucky? Get a random prompt based on a search seed, such as "superhero"
|
||||
- Text String: Write a single line text string value
|
||||
- Text String Truncate: Truncate a string from the beginning or end by characters or words.
|
||||
- Text to Conditioning: Convert a text string to conditioning.
|
||||
- True Random.org Number Generator: Generate a truly random number online from atmospheric noise with [Random.org](https://random.org/)
|
||||
- [Get your API key from your account page](https://accounts.random.org/)
|
||||
- Upscale Model Input Switch: Switch between two Upscale Models inputs based on a boolean switch.
|
||||
- Write to Morph GIF: Write a new frame to an existing GIF (or create new one) with interpolation between frames.
|
||||
- Write to Video: Write a frame as you generate to a video (Best used with FFV1 for lossless images)
|
||||
- VAE Input Switch: Switch between two VAE inputs based on boolean input
|
||||
</details>
|
||||
|
||||
|
||||
<br>
|
||||
|
||||
### Extra Nodes
|
||||
|
||||
- CLIPTextEncode (BlenderNeko Advanced + NSP): Only available if you have BlenderNeko's [Advanced CLIP Text Encode](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb). Allows for NSP and Wildcard use with their advanced CLIPTextEncode.
|
||||
|
||||
|
||||
### Notes:
|
||||
|
||||
- **CLIPTextEncode (NSP)** and **CLIPTextEncode (BlenderNeko Advanced + NSP)**: Accept dynamic prompts in `<option1|option2|option3>` format. This will respect the nodes input seed to yield reproducible results like NSP and Wildcards.
|
||||
- **CLIPTextEncode (NSP)** and **CLIPTextEncode (BlenderNeko Advanced + NSP)**: Assign variables with `$|prompt words|$` format. You can then print this word again within the prompt with the number corresponding the order you created them. So the first prompt var would be printed with `$1` and the second with `$2` and so on.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Video Nodes
|
||||
|
||||
### Codecs
|
||||
You can use codecs that are available to your ffmpeg binaries by adding their fourcc ID (in one string), and appropriate container extension to the `was_suite_config.json`
|
||||
|
||||
Example [H264 Codecs](https://github.com/cisco/openh264/releases/tag/v1.8.0) (Defaults)
|
||||
```
|
||||
"ffmpeg_extra_codecs": {
|
||||
"avc1": ".mp4",
|
||||
"h264": ".mkv"
|
||||
}
|
||||
```
|
||||
|
||||
### Notes
|
||||
- For now I am only supporting **Windows** installations for video nodes.
|
||||
- I do not have access to Mac or a stand-alone linux distro. If you get them working and want to PR a patch/directions, feel free.
|
||||
- Video nodes require [FFMPEG](https://ffmpeg.org/download.html). You should download the proper FFMPEG binaries for you system and set the FFMPEG path in the config file.
|
||||
- Additionally, if you want to use H264 codec need to [download OpenH264 1.8.0](https://github.com/cisco/openh264/releases/tag/v1.8.0) and place it in the root of ComfyUI (Example: `C:\ComfyUI_windows_portable`).
|
||||
- FFV1 will complain about invalid container. You can ignore this. The resulting MKV file is readable. I have not figured out what this issue is about. Documentaion tells me to use MKV, but it's telling me it's unsupported.
|
||||
- If you know how to resolve this, I'd love a PR
|
||||
- `Write to Video` node should use a lossless video codec or when it copies frames, and reapplies compression, it will start expontentially ruining the starting frames run to run.
|
||||
|
||||
---
|
||||
|
||||
# Text Tokens
|
||||
Text tokens can be used in the Save Text File and Save Image nodes. You can also add your own custom tokens with the Text Add Tokens node.
|
||||
|
||||
The token name can be anything excluding the `:` character to define your token. It can also be simple Regular Expressions.
|
||||
|
||||
## Built-in Tokens
|
||||
- [time]
|
||||
- The current system microtime
|
||||
- [time(`format_code`)]
|
||||
- The current system time in human readable format. Utilizing [datetime](https://docs.python.org/3/library/datetime.html) formatting
|
||||
- Example: `[hostname]_[time]__[time(%Y-%m-%d__%I-%M%p)]` would output: **SKYNET-MASTER_1680897261__2023-04-07__07-54PM**
|
||||
- [hostname]
|
||||
- The hostname of the system executing ComfyUI
|
||||
- [cuda_device]
|
||||
- The cuda device from `comfy.model_management.get_cuda_device()`
|
||||
- [cuda_name]
|
||||
- The cuda name from `comfy.model_management.get_cuda_device_name()`
|
||||
|
||||
<br>
|
||||
|
||||
<details>
|
||||
<summary>$\color{orange}{Expand\ Date\ Code\ List}$</summary>
|
||||
|
||||
<br>
|
||||
|
||||
| Directive | Meaning | Example | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| %a | Weekday as locale’s abbreviated name. | Sun, Mon, …, Sat (en_US); So, Mo, …, Sa (de_DE) | (1) |
|
||||
| %A | Weekday as locale’s full name. | Sunday, Monday, …, Saturday (en_US); Sonntag, Montag, …, Samstag (de_DE) | (1) |
|
||||
| %w | Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. | 0, 1, …, 6 | |
|
||||
| %d | Day of the month as a zero-padded decimal number. | 01, 02, …, 31 | (9) |
|
||||
| %b | Month as locale’s abbreviated name. | Jan, Feb, …, Dec (en_US); Jan, Feb, …, Dez (de_DE) | (1) |
|
||||
| %B | Month as locale’s full name. | January, February, …, December (en_US); Januar, Februar, …, Dezember (de_DE) | (1) |
|
||||
| %m | Month as a zero-padded decimal number. | 01, 02, …, 12 | (9) |
|
||||
| %y | Year without century as a zero-padded decimal number. | 00, 01, …, 99 | (9) |
|
||||
| %Y | Year with century as a decimal number. | 0001, 0002, …, 2013, 2014, …, 9998, 9999 | (2) |
|
||||
| %H | Hour (24-hour clock) as a zero-padded decimal number. | 00, 01, …, 23 | (9) |
|
||||
| %I | Hour (12-hour clock) as a zero-padded decimal number. | 01, 02, …, 12 | (9) |
|
||||
| %p | Locale’s equivalent of either AM or PM. | AM, PM (en_US); am, pm (de_DE) | (1), (3) |
|
||||
| %M | Minute as a zero-padded decimal number. | 00, 01, …, 59 | (9) |
|
||||
| %S | Second as a zero-padded decimal number. | 00, 01, …, 59 | (4), (9) |
|
||||
| %f | Microsecond as a decimal number, zero-padded to 6 digits. | 000000, 000001, …, 999999 | (5) |
|
||||
| %z | UTC offset in the form ±HHMM[SS[.ffffff]] (empty string if the object is naive). | (empty), +0000, -0400, +1030, +063415, -030712.345216 | (6) |
|
||||
| %Z | Time zone name (empty string if the object is naive). | (empty), UTC, GMT | (6) |
|
||||
| %j | Day of the year as a zero-padded decimal number. | 001, 002, …, 366 | (9) |
|
||||
| %U | Week number of the year (Sunday as the first day of the week) as a zero-padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0. | 00, 01, …, 53 | (7), (9) |
|
||||
| %W | Week number of the year (Monday as the first day of the week) as a zero-padded decimal number. All days in a new year preceding the first Monday are considered to be in week 0. | 00, 01, …, 53 | (7), (9) |
|
||||
| %c | Locale’s appropriate date and time representation. | Tue Aug 16 21:30:00 1988 (en_US); Di 16 Aug 21:30:00 1988 (de_DE) | (1) |
|
||||
| %x | Locale’s appropriate date representation. | 08/16/88 (None); 08/16/1988 (en_US); 16.08.1988 (de_DE) | (1) |
|
||||
| %X | Locale’s appropriate time representation. | 21:30:00 (en_US); 21:30:00 (de_DE) | (1) |
|
||||
| %% | A literal '%' character. | % | |
|
||||
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
---
|
||||
|
||||
# Other Features
|
||||
|
||||
### Import AUTOMATIC1111 WebUI Styles
|
||||
When using the latest builds of WAS Node Suite a `was_suite_config.json` file will be generated (if it doesn't exist). In this file you can setup a A1111 styles import.
|
||||
|
||||
- Run ComfyUI to generate the new `/custom-nodes/was-node-suite-comfyui/was_Suite_config.json` file.
|
||||
- Open the `was_suite_config.json` file with a text editor.
|
||||
- Replace the `webui_styles` value from `None` to the path of your A1111 styles file called **styles.csv**. Be sure to use double backslashes for Windows paths.
|
||||
- Example `C:\\python\\stable-diffusion-webui\\styles.csv`
|
||||
- Restart ComfyUI
|
||||
- Select a style with the `Prompt Styles Node`.
|
||||
- The first ASCII output is your positive prompt, and the second ASCII output is your negative prompt.
|
||||
|
||||
You can set `webui_styles_persistent_update` to `true` to update the WAS Node Suite styles from WebUI every start of ComfyUI
|
||||
|
||||
# Recommended Installation:
|
||||
If you're running on Linux, or non-admin account on windows you'll want to ensure `/ComfyUI/custom_nodes`, `was-node-suite-comfyui`, and `WAS_Node_Suite.py` has write permissions.
|
||||
|
||||
There is now a **install.bat** you can run to install to portable if detected. Otherwise it will default to system and assume you followed ConfyUI's manual installation steps.
|
||||
|
||||
- Navigate to your `/ComfyUI/custom_nodes/` folder
|
||||
- Run `git clone https://github.com/WASasquatch/was-node-suite-comfyui/`
|
||||
- Navigate to your `was-node-suite-comfyui` folder
|
||||
- Portable/venv:
|
||||
- Run `path/to/ComfUI/python_embeded/python.exe -s -m pip install -r requirements.txt`
|
||||
- With system python
|
||||
- Run `pip install -r requirements.txt`
|
||||
- Start ComfyUI
|
||||
- WAS Suite should uninstall legacy nodes automatically for you.
|
||||
- Tools will be located in the WAS Suite menu.
|
||||
|
||||
## Alternate [Legacy] Installation:
|
||||
If you're running on Linux, or non-admin account on windows you'll want to ensure `/ComfyUI/custom_nodes`, and `WAS_Node_Suite.py` has write permissions.
|
||||
|
||||
- Download `WAS_Node_Suite.py`
|
||||
- Move the file to your `/ComfyUI/custom_nodes/` folder
|
||||
- WAS Node Suite will attempt install dependencies on it's own, but you may need to manually do so. The dependencies required are in the `requirements.txt` on this repo. See installation steps above.
|
||||
- If this process fails attempt the following:
|
||||
- Navigate to your `was-node-suite-comfyui` folder
|
||||
- Portable/venv:
|
||||
- Run `path/to/ComfUI/python_embeded/python.exe -s -m pip install -r requirements.txt`
|
||||
- With system python
|
||||
- Run `pip install -r requirements.txt`
|
||||
- Start, or Restart ComfyUI
|
||||
- WAS Suite should uninstall legacy nodes automatically for you.
|
||||
- Tools will be located in the WAS Suite menu.
|
||||
|
||||
This method will not install the resources required for Image Crop Face node, and you'll have to download the [./res/](https://github.com/WASasquatch/was-node-suite-comfyui/tree/main/res) folder yourself.
|
||||
|
||||
## Installing on Colab
|
||||
Create a new cell and add the following code, then run the cell. You may need to edit the path to your `custom_nodes` folder. You can also use the [colab hosted here](https://colab.research.google.com/github/WASasquatch/comfyui-colab-was-node-suite/blob/main/ComfyUI_%2B_WAS_Node_Suite.ipynb)
|
||||
|
||||
- `!git clone https://github.com/WASasquatch/was-node-suite-comfyui /content/ComfyUI/custom_nodes/was-node-suite-comfyui`
|
||||
- `!pip install -r /content/ComfyUI/custom_nodes/was-node-suite-comfyui/requirements.txt`
|
||||
- Restart Colab Runtime (don't disconnect)
|
||||
- Tools will be located in the WAS Suite menu.
|
||||
14828
custom_nodes/was-node-suite-comfyui/WAS_Node_Suite.py
Normal file
3
custom_nodes/was-node-suite-comfyui/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .WAS_Node_Suite import NODE_CLASS_MAPPINGS
|
||||
|
||||
__all__ = ['NODE_CLASS_MAPPINGS']
|
||||
16
custom_nodes/was-node-suite-comfyui/install.bat
Normal file
@@ -0,0 +1,16 @@
|
||||
@echo off
|
||||
|
||||
set "requirements_txt=%~dp0\requirements.txt"
|
||||
set "python_exec=..\..\..\python_embeded\python.exe"
|
||||
|
||||
echo Installing WAS-NS ...
|
||||
|
||||
if exist "%python_exec%" (
|
||||
echo Installing with ComfyUI Portable
|
||||
"%python_exec%" -s -m pip install -r "%requirements_txt%"
|
||||
) else (
|
||||
echo Installing with system Python
|
||||
pip install -r "%requirements_txt%"
|
||||
)
|
||||
|
||||
pause
|
||||
20
custom_nodes/was-node-suite-comfyui/install_alt.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
|
||||
set "requirements_txt=%~dp0\requirements.txt"
|
||||
set "python_exec=..\..\..\python_embeded\python.exe"
|
||||
|
||||
echo Installing WAS-NS ...
|
||||
|
||||
if exist "%python_exec%" (
|
||||
echo Installing with ComfyUI Portable
|
||||
for /f "delims=" %%i in (%requirements_txt%) do (
|
||||
%python_exec% -s -m pip install "%%i"
|
||||
)
|
||||
) else (
|
||||
echo Installing with system Python
|
||||
for /f "delims=" %%i in (%requirements_txt%) do (
|
||||
pip install "%%i"
|
||||
)
|
||||
)
|
||||
|
||||
pause
|
||||
@@ -0,0 +1 @@
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"architectures": [
|
||||
"BertModel"
|
||||
],
|
||||
"attention_probs_dropout_prob": 0.1,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout_prob": 0.1,
|
||||
"hidden_size": 768,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 3072,
|
||||
"layer_norm_eps": 1e-12,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "bert",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
"pad_token_id": 0,
|
||||
"type_vocab_size": 2,
|
||||
"vocab_size": 30522,
|
||||
"encoder_width": 768,
|
||||
"add_cross_attention": true
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
image_root: '/export/share/datasets/vision/coco/images/'
|
||||
ann_root: 'annotation'
|
||||
coco_gt_root: 'annotation/coco_gt'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
vit: 'base'
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
batch_size: 32
|
||||
init_lr: 1e-5
|
||||
|
||||
# vit: 'large'
|
||||
# vit_grad_ckpt: True
|
||||
# vit_ckpt_layer: 5
|
||||
# batch_size: 16
|
||||
# init_lr: 2e-6
|
||||
|
||||
image_size: 384
|
||||
|
||||
# generation configs
|
||||
max_length: 20
|
||||
min_length: 5
|
||||
num_beams: 3
|
||||
prompt: 'a picture of '
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 5
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"architectures": [
|
||||
"BertModel"
|
||||
],
|
||||
"attention_probs_dropout_prob": 0.1,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout_prob": 0.1,
|
||||
"hidden_size": 768,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 3072,
|
||||
"layer_norm_eps": 1e-12,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "bert",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
"pad_token_id": 0,
|
||||
"type_vocab_size": 2,
|
||||
"vocab_size": 30524,
|
||||
"encoder_width": 768,
|
||||
"add_cross_attention": true
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
image_root: '/export/share/datasets/vision/NLVR2/'
|
||||
ann_root: 'annotation'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_nlvr.pth'
|
||||
|
||||
#size of vit model; base or large
|
||||
vit: 'base'
|
||||
batch_size_train: 16
|
||||
batch_size_test: 64
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
max_epoch: 15
|
||||
|
||||
image_size: 384
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
init_lr: 3e-5
|
||||
min_lr: 0
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
image_root: '/export/share/datasets/vision/nocaps/'
|
||||
ann_root: 'annotation'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
|
||||
|
||||
vit: 'base'
|
||||
batch_size: 32
|
||||
|
||||
image_size: 384
|
||||
|
||||
max_length: 20
|
||||
min_length: 5
|
||||
num_beams: 3
|
||||
prompt: 'a picture of '
|
||||
@@ -0,0 +1,27 @@
|
||||
train_file: ['/export/share/junnan-li/VL_pretrain/annotation/coco_karpathy_train.json',
|
||||
'/export/share/junnan-li/VL_pretrain/annotation/vg_caption.json',
|
||||
]
|
||||
laion_path: ''
|
||||
|
||||
# size of vit model; base or large
|
||||
vit: 'base'
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
|
||||
image_size: 224
|
||||
batch_size: 75
|
||||
|
||||
queue_size: 57600
|
||||
alpha: 0.4
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
init_lr: 3e-4
|
||||
min_lr: 1e-6
|
||||
warmup_lr: 1e-6
|
||||
lr_decay_rate: 0.9
|
||||
max_epoch: 20
|
||||
warmup_steps: 3000
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
image_root: '/export/share/datasets/vision/coco/images/'
|
||||
ann_root: 'annotation'
|
||||
dataset: 'coco'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
|
||||
vit: 'base'
|
||||
batch_size_train: 32
|
||||
batch_size_test: 64
|
||||
vit_grad_ckpt: True
|
||||
vit_ckpt_layer: 4
|
||||
init_lr: 1e-5
|
||||
|
||||
# vit: 'large'
|
||||
# batch_size_train: 16
|
||||
# batch_size_test: 32
|
||||
# vit_grad_ckpt: True
|
||||
# vit_ckpt_layer: 12
|
||||
# init_lr: 5e-6
|
||||
|
||||
image_size: 384
|
||||
queue_size: 57600
|
||||
alpha: 0.4
|
||||
k_test: 256
|
||||
negative_all_rank: True
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 6
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
image_root: '/export/share/datasets/vision/flickr30k/'
|
||||
ann_root: 'annotation'
|
||||
dataset: 'flickr'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_flickr.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
|
||||
vit: 'base'
|
||||
batch_size_train: 32
|
||||
batch_size_test: 64
|
||||
vit_grad_ckpt: True
|
||||
vit_ckpt_layer: 4
|
||||
init_lr: 1e-5
|
||||
|
||||
# vit: 'large'
|
||||
# batch_size_train: 16
|
||||
# batch_size_test: 32
|
||||
# vit_grad_ckpt: True
|
||||
# vit_ckpt_layer: 10
|
||||
# init_lr: 5e-6
|
||||
|
||||
image_size: 384
|
||||
queue_size: 57600
|
||||
alpha: 0.4
|
||||
k_test: 128
|
||||
negative_all_rank: False
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 6
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
video_root: '/export/share/dongxuli/data/msrvtt_retrieval/videos'
|
||||
ann_root: 'annotation'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
vit: 'base'
|
||||
batch_size: 64
|
||||
k_test: 128
|
||||
image_size: 384
|
||||
num_frm_test: 8
|
||||
@@ -0,0 +1,25 @@
|
||||
vqa_root: '/export/share/datasets/vision/VQA/Images/mscoco/' #followed by train2014/
|
||||
vg_root: '/export/share/datasets/vision/visual-genome/' #followed by image/
|
||||
train_files: ['vqa_train','vqa_val','vg_qa']
|
||||
ann_root: 'annotation'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
vit: 'base'
|
||||
batch_size_train: 16
|
||||
batch_size_test: 32
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
init_lr: 2e-5
|
||||
|
||||
image_size: 480
|
||||
|
||||
k_test: 128
|
||||
inference: 'rank'
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 10
|
||||
955
custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_med.py
Normal file
@@ -0,0 +1,955 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
* Based on huggingface code base
|
||||
* https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
|
||||
'''
|
||||
|
||||
import math
|
||||
import os
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import Tensor, device, dtype, nn
|
||||
import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
import torch.nn.functional as F
|
||||
|
||||
from transformers.activations import ACT2FN
|
||||
from transformers.file_utils import (
|
||||
ModelOutput,
|
||||
)
|
||||
from transformers.modeling_outputs import (
|
||||
BaseModelOutputWithPastAndCrossAttentions,
|
||||
BaseModelOutputWithPoolingAndCrossAttentions,
|
||||
CausalLMOutputWithCrossAttentions,
|
||||
MaskedLMOutput,
|
||||
MultipleChoiceModelOutput,
|
||||
NextSentencePredictorOutput,
|
||||
QuestionAnsweringModelOutput,
|
||||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from transformers.modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from transformers.utils import logging
|
||||
from transformers.models.bert.configuration_bert import BertConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
class BertEmbeddings(nn.Module):
|
||||
"""Construct the embeddings from word and position embeddings."""
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
||||
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
||||
|
||||
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
||||
# any TensorFlow checkpoint file
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
||||
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
||||
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
||||
|
||||
self.config = config
|
||||
|
||||
def forward(
|
||||
self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
||||
):
|
||||
if input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
else:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
|
||||
seq_length = input_shape[1]
|
||||
|
||||
if position_ids is None:
|
||||
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
||||
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = self.word_embeddings(input_ids)
|
||||
|
||||
embeddings = inputs_embeds
|
||||
|
||||
if self.position_embedding_type == "absolute":
|
||||
position_embeddings = self.position_embeddings(position_ids)
|
||||
embeddings += position_embeddings
|
||||
embeddings = self.LayerNorm(embeddings)
|
||||
embeddings = self.dropout(embeddings)
|
||||
return embeddings
|
||||
|
||||
|
||||
class BertSelfAttention(nn.Module):
|
||||
def __init__(self, config, is_cross_attention):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
||||
raise ValueError(
|
||||
"The hidden size (%d) is not a multiple of the number of attention "
|
||||
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
||||
)
|
||||
|
||||
self.num_attention_heads = config.num_attention_heads
|
||||
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
||||
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
||||
|
||||
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
if is_cross_attention:
|
||||
self.key = nn.Linear(config.encoder_width, self.all_head_size)
|
||||
self.value = nn.Linear(config.encoder_width, self.all_head_size)
|
||||
else:
|
||||
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
|
||||
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
||||
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
||||
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
||||
self.max_position_embeddings = config.max_position_embeddings
|
||||
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
||||
self.save_attention = False
|
||||
|
||||
def save_attn_gradients(self, attn_gradients):
|
||||
self.attn_gradients = attn_gradients
|
||||
|
||||
def get_attn_gradients(self):
|
||||
return self.attn_gradients
|
||||
|
||||
def save_attention_map(self, attention_map):
|
||||
self.attention_map = attention_map
|
||||
|
||||
def get_attention_map(self):
|
||||
return self.attention_map
|
||||
|
||||
def transpose_for_scores(self, x):
|
||||
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
||||
x = x.view(*new_x_shape)
|
||||
return x.permute(0, 2, 1, 3)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
mixed_query_layer = self.query(hidden_states)
|
||||
|
||||
# If this is instantiated as a cross-attention module, the keys
|
||||
# and values come from an encoder; the attention mask needs to be
|
||||
# such that the encoder's padding tokens are not attended to.
|
||||
is_cross_attention = encoder_hidden_states is not None
|
||||
|
||||
if is_cross_attention:
|
||||
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
||||
attention_mask = encoder_attention_mask
|
||||
elif past_key_value is not None:
|
||||
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
||||
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
||||
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
||||
else:
|
||||
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
||||
|
||||
query_layer = self.transpose_for_scores(mixed_query_layer)
|
||||
|
||||
past_key_value = (key_layer, value_layer)
|
||||
|
||||
# Take the dot product between "query" and "key" to get the raw attention scores.
|
||||
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
||||
|
||||
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
||||
seq_length = hidden_states.size()[1]
|
||||
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
||||
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
||||
distance = position_ids_l - position_ids_r
|
||||
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
||||
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
||||
|
||||
if self.position_embedding_type == "relative_key":
|
||||
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
||||
attention_scores = attention_scores + relative_position_scores
|
||||
elif self.position_embedding_type == "relative_key_query":
|
||||
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
||||
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
||||
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
||||
|
||||
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
||||
if attention_mask is not None:
|
||||
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
||||
attention_scores = attention_scores + attention_mask
|
||||
|
||||
# Normalize the attention scores to probabilities.
|
||||
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
||||
|
||||
if is_cross_attention and self.save_attention:
|
||||
self.save_attention_map(attention_probs)
|
||||
attention_probs.register_hook(self.save_attn_gradients)
|
||||
|
||||
# This is actually dropping out entire tokens to attend to, which might
|
||||
# seem a bit unusual, but is taken from the original Transformer paper.
|
||||
attention_probs_dropped = self.dropout(attention_probs)
|
||||
|
||||
# Mask heads if we want to
|
||||
if head_mask is not None:
|
||||
attention_probs_dropped = attention_probs_dropped * head_mask
|
||||
|
||||
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
||||
|
||||
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
||||
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
||||
context_layer = context_layer.view(*new_context_layer_shape)
|
||||
|
||||
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
||||
|
||||
outputs = outputs + (past_key_value,)
|
||||
return outputs
|
||||
|
||||
|
||||
class BertSelfOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertAttention(nn.Module):
|
||||
def __init__(self, config, is_cross_attention=False):
|
||||
super().__init__()
|
||||
self.self = BertSelfAttention(config, is_cross_attention)
|
||||
self.output = BertSelfOutput(config)
|
||||
self.pruned_heads = set()
|
||||
|
||||
def prune_heads(self, heads):
|
||||
if len(heads) == 0:
|
||||
return
|
||||
heads, index = find_pruneable_heads_and_indices(
|
||||
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
||||
)
|
||||
|
||||
# Prune linear layers
|
||||
self.self.query = prune_linear_layer(self.self.query, index)
|
||||
self.self.key = prune_linear_layer(self.self.key, index)
|
||||
self.self.value = prune_linear_layer(self.self.value, index)
|
||||
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
||||
|
||||
# Update hyper params and store pruned heads
|
||||
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
||||
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
||||
self.pruned_heads = self.pruned_heads.union(heads)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
self_outputs = self.self(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
)
|
||||
attention_output = self.output(self_outputs[0], hidden_states)
|
||||
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
||||
return outputs
|
||||
|
||||
|
||||
class BertIntermediate(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
||||
if isinstance(config.hidden_act, str):
|
||||
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.intermediate_act_fn = config.hidden_act
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.intermediate_act_fn(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertLayer(nn.Module):
|
||||
def __init__(self, config, layer_num):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
||||
self.seq_len_dim = 1
|
||||
self.attention = BertAttention(config)
|
||||
self.layer_num = layer_num
|
||||
if self.config.add_cross_attention:
|
||||
self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
|
||||
self.intermediate = BertIntermediate(config)
|
||||
self.output = BertOutput(config)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
mode=None,
|
||||
):
|
||||
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
||||
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
||||
self_attention_outputs = self.attention(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
output_attentions=output_attentions,
|
||||
past_key_value=self_attn_past_key_value,
|
||||
)
|
||||
attention_output = self_attention_outputs[0]
|
||||
|
||||
outputs = self_attention_outputs[1:-1]
|
||||
present_key_value = self_attention_outputs[-1]
|
||||
|
||||
if mode=='multimodal':
|
||||
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
|
||||
|
||||
cross_attention_outputs = self.crossattention(
|
||||
attention_output,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
output_attentions=output_attentions,
|
||||
)
|
||||
attention_output = cross_attention_outputs[0]
|
||||
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
||||
layer_output = apply_chunking_to_forward(
|
||||
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
||||
)
|
||||
outputs = (layer_output,) + outputs
|
||||
|
||||
outputs = outputs + (present_key_value,)
|
||||
|
||||
return outputs
|
||||
|
||||
def feed_forward_chunk(self, attention_output):
|
||||
intermediate_output = self.intermediate(attention_output)
|
||||
layer_output = self.output(intermediate_output, attention_output)
|
||||
return layer_output
|
||||
|
||||
|
||||
class BertEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
|
||||
self.gradient_checkpointing = False
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
return_dict=True,
|
||||
mode='multimodal',
|
||||
):
|
||||
all_hidden_states = () if output_hidden_states else None
|
||||
all_self_attentions = () if output_attentions else None
|
||||
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
||||
|
||||
next_decoder_cache = () if use_cache else None
|
||||
|
||||
for i in range(self.config.num_hidden_layers):
|
||||
layer_module = self.layer[i]
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
layer_head_mask = head_mask[i] if head_mask is not None else None
|
||||
past_key_value = past_key_values[i] if past_key_values is not None else None
|
||||
|
||||
if self.gradient_checkpointing and self.training:
|
||||
|
||||
if use_cache:
|
||||
logger.warn(
|
||||
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
||||
)
|
||||
use_cache = False
|
||||
|
||||
def create_custom_forward(module):
|
||||
def custom_forward(*inputs):
|
||||
return module(*inputs, past_key_value, output_attentions)
|
||||
|
||||
return custom_forward
|
||||
|
||||
layer_outputs = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(layer_module),
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
mode=mode,
|
||||
)
|
||||
else:
|
||||
layer_outputs = layer_module(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
hidden_states = layer_outputs[0]
|
||||
if use_cache:
|
||||
next_decoder_cache += (layer_outputs[-1],)
|
||||
if output_attentions:
|
||||
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
||||
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
if not return_dict:
|
||||
return tuple(
|
||||
v
|
||||
for v in [
|
||||
hidden_states,
|
||||
next_decoder_cache,
|
||||
all_hidden_states,
|
||||
all_self_attentions,
|
||||
all_cross_attentions,
|
||||
]
|
||||
if v is not None
|
||||
)
|
||||
return BaseModelOutputWithPastAndCrossAttentions(
|
||||
last_hidden_state=hidden_states,
|
||||
past_key_values=next_decoder_cache,
|
||||
hidden_states=all_hidden_states,
|
||||
attentions=all_self_attentions,
|
||||
cross_attentions=all_cross_attentions,
|
||||
)
|
||||
|
||||
|
||||
class BertPooler(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.activation = nn.Tanh()
|
||||
|
||||
def forward(self, hidden_states):
|
||||
# We "pool" the model by simply taking the hidden state corresponding
|
||||
# to the first token.
|
||||
first_token_tensor = hidden_states[:, 0]
|
||||
pooled_output = self.dense(first_token_tensor)
|
||||
pooled_output = self.activation(pooled_output)
|
||||
return pooled_output
|
||||
|
||||
|
||||
class BertPredictionHeadTransform(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
if isinstance(config.hidden_act, str):
|
||||
self.transform_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.transform_act_fn = config.hidden_act
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.transform_act_fn(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertLMPredictionHead(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.transform = BertPredictionHeadTransform(config)
|
||||
|
||||
# The output weights are the same as the input embeddings, but there is
|
||||
# an output-only bias for each token.
|
||||
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
||||
|
||||
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
||||
|
||||
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
||||
self.decoder.bias = self.bias
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.transform(hidden_states)
|
||||
hidden_states = self.decoder(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertOnlyMLMHead(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.predictions = BertLMPredictionHead(config)
|
||||
|
||||
def forward(self, sequence_output):
|
||||
prediction_scores = self.predictions(sequence_output)
|
||||
return prediction_scores
|
||||
|
||||
|
||||
class BertPreTrainedModel(PreTrainedModel):
|
||||
"""
|
||||
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
||||
models.
|
||||
"""
|
||||
|
||||
config_class = BertConfig
|
||||
base_model_prefix = "bert"
|
||||
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
||||
|
||||
def _init_weights(self, module):
|
||||
""" Initialize the weights """
|
||||
if isinstance(module, (nn.Linear, nn.Embedding)):
|
||||
# Slightly different from the TF version which uses truncated_normal for initialization
|
||||
# cf https://github.com/pytorch/pytorch/pull/5617
|
||||
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||
elif isinstance(module, nn.LayerNorm):
|
||||
module.bias.data.zero_()
|
||||
module.weight.data.fill_(1.0)
|
||||
if isinstance(module, nn.Linear) and module.bias is not None:
|
||||
module.bias.data.zero_()
|
||||
|
||||
|
||||
class BertModel(BertPreTrainedModel):
|
||||
"""
|
||||
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
||||
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
|
||||
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
||||
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
||||
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
|
||||
input to the forward pass.
|
||||
"""
|
||||
|
||||
def __init__(self, config, add_pooling_layer=True):
|
||||
super().__init__(config)
|
||||
self.config = config
|
||||
|
||||
self.embeddings = BertEmbeddings(config)
|
||||
|
||||
self.encoder = BertEncoder(config)
|
||||
|
||||
self.pooler = BertPooler(config) if add_pooling_layer else None
|
||||
|
||||
self.init_weights()
|
||||
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.embeddings.word_embeddings
|
||||
|
||||
def set_input_embeddings(self, value):
|
||||
self.embeddings.word_embeddings = value
|
||||
|
||||
def _prune_heads(self, heads_to_prune):
|
||||
"""
|
||||
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
||||
class PreTrainedModel
|
||||
"""
|
||||
for layer, heads in heads_to_prune.items():
|
||||
self.encoder.layer[layer].attention.prune_heads(heads)
|
||||
|
||||
|
||||
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
|
||||
"""
|
||||
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
||||
|
||||
Arguments:
|
||||
attention_mask (:obj:`torch.Tensor`):
|
||||
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
||||
input_shape (:obj:`Tuple[int]`):
|
||||
The shape of the input to the model.
|
||||
device: (:obj:`torch.device`):
|
||||
The device of the input to the model.
|
||||
|
||||
Returns:
|
||||
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
|
||||
"""
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
if attention_mask.dim() == 3:
|
||||
extended_attention_mask = attention_mask[:, None, :, :]
|
||||
elif attention_mask.dim() == 2:
|
||||
# Provided a padding mask of dimensions [batch_size, seq_length]
|
||||
# - if the model is a decoder, apply a causal mask in addition to the padding mask
|
||||
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
||||
if is_decoder:
|
||||
batch_size, seq_length = input_shape
|
||||
|
||||
seq_ids = torch.arange(seq_length, device=device)
|
||||
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
|
||||
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
|
||||
# causal and attention masks must have same type with pytorch version < 1.3
|
||||
causal_mask = causal_mask.to(attention_mask.dtype)
|
||||
|
||||
if causal_mask.shape[1] < attention_mask.shape[1]:
|
||||
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
|
||||
causal_mask = torch.cat(
|
||||
[
|
||||
torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
|
||||
causal_mask,
|
||||
],
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
|
||||
else:
|
||||
extended_attention_mask = attention_mask[:, None, None, :]
|
||||
else:
|
||||
raise ValueError(
|
||||
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
|
||||
input_shape, attention_mask.shape
|
||||
)
|
||||
)
|
||||
|
||||
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
||||
# masked positions, this operation will create a tensor which is 0.0 for
|
||||
# positions we want to attend and -10000.0 for masked positions.
|
||||
# Since we are adding it to the raw scores before the softmax, this is
|
||||
# effectively the same as removing these entirely.
|
||||
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
||||
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
||||
return extended_attention_mask
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
encoder_embeds=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
is_decoder=False,
|
||||
mode='multimodal',
|
||||
):
|
||||
r"""
|
||||
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
||||
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
||||
the model is configured as a decoder.
|
||||
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
||||
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
||||
- 1 for tokens that are **not masked**,
|
||||
- 0 for tokens that are **masked**.
|
||||
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
||||
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
||||
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
||||
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
||||
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
||||
use_cache (:obj:`bool`, `optional`):
|
||||
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
||||
decoding (see :obj:`past_key_values`).
|
||||
"""
|
||||
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||
output_hidden_states = (
|
||||
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||
)
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
if is_decoder:
|
||||
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
||||
else:
|
||||
use_cache = False
|
||||
|
||||
if input_ids is not None and inputs_embeds is not None:
|
||||
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
||||
elif input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
batch_size, seq_length = input_shape
|
||||
device = input_ids.device
|
||||
elif inputs_embeds is not None:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
batch_size, seq_length = input_shape
|
||||
device = inputs_embeds.device
|
||||
elif encoder_embeds is not None:
|
||||
input_shape = encoder_embeds.size()[:-1]
|
||||
batch_size, seq_length = input_shape
|
||||
device = encoder_embeds.device
|
||||
else:
|
||||
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
|
||||
|
||||
# past_key_values_length
|
||||
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
||||
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
||||
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
|
||||
device, is_decoder)
|
||||
|
||||
# If a 2D or 3D attention mask is provided for the cross-attention
|
||||
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
||||
if encoder_hidden_states is not None:
|
||||
if type(encoder_hidden_states) == list:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
||||
else:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
||||
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
||||
|
||||
if type(encoder_attention_mask) == list:
|
||||
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
||||
elif encoder_attention_mask is None:
|
||||
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = None
|
||||
|
||||
# Prepare head mask if needed
|
||||
# 1.0 in head_mask indicate we keep the head
|
||||
# attention_probs has shape bsz x n_heads x N x N
|
||||
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
||||
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
||||
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
||||
|
||||
if encoder_embeds is None:
|
||||
embedding_output = self.embeddings(
|
||||
input_ids=input_ids,
|
||||
position_ids=position_ids,
|
||||
inputs_embeds=inputs_embeds,
|
||||
past_key_values_length=past_key_values_length,
|
||||
)
|
||||
else:
|
||||
embedding_output = encoder_embeds
|
||||
|
||||
encoder_outputs = self.encoder(
|
||||
embedding_output,
|
||||
attention_mask=extended_attention_mask,
|
||||
head_mask=head_mask,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
encoder_attention_mask=encoder_extended_attention_mask,
|
||||
past_key_values=past_key_values,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
mode=mode,
|
||||
)
|
||||
sequence_output = encoder_outputs[0]
|
||||
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
||||
|
||||
if not return_dict:
|
||||
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
||||
|
||||
return BaseModelOutputWithPoolingAndCrossAttentions(
|
||||
last_hidden_state=sequence_output,
|
||||
pooler_output=pooled_output,
|
||||
past_key_values=encoder_outputs.past_key_values,
|
||||
hidden_states=encoder_outputs.hidden_states,
|
||||
attentions=encoder_outputs.attentions,
|
||||
cross_attentions=encoder_outputs.cross_attentions,
|
||||
)
|
||||
|
||||
|
||||
|
||||
class BertLMHeadModel(BertPreTrainedModel):
|
||||
|
||||
_keys_to_ignore_on_load_unexpected = [r"pooler"]
|
||||
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
self.bert = BertModel(config, add_pooling_layer=False)
|
||||
self.cls = BertOnlyMLMHead(config)
|
||||
|
||||
self.init_weights()
|
||||
|
||||
def get_output_embeddings(self):
|
||||
return self.cls.predictions.decoder
|
||||
|
||||
def set_output_embeddings(self, new_embeddings):
|
||||
self.cls.predictions.decoder = new_embeddings
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
labels=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
return_logits=False,
|
||||
is_decoder=True,
|
||||
reduction='mean',
|
||||
mode='multimodal',
|
||||
):
|
||||
r"""
|
||||
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
||||
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
||||
the model is configured as a decoder.
|
||||
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
||||
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
||||
- 1 for tokens that are **not masked**,
|
||||
- 0 for tokens that are **masked**.
|
||||
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
||||
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
|
||||
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
|
||||
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
||||
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
||||
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
||||
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
||||
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
||||
use_cache (:obj:`bool`, `optional`):
|
||||
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
||||
decoding (see :obj:`past_key_values`).
|
||||
Returns:
|
||||
Example::
|
||||
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
|
||||
>>> import torch
|
||||
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
|
||||
>>> config = BertConfig.from_pretrained("bert-base-cased")
|
||||
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
|
||||
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> prediction_logits = outputs.logits
|
||||
"""
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
if labels is not None:
|
||||
use_cache = False
|
||||
|
||||
outputs = self.bert(
|
||||
input_ids,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
head_mask=head_mask,
|
||||
inputs_embeds=inputs_embeds,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
encoder_attention_mask=encoder_attention_mask,
|
||||
past_key_values=past_key_values,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
is_decoder=is_decoder,
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
sequence_output = outputs[0]
|
||||
prediction_scores = self.cls(sequence_output)
|
||||
|
||||
if return_logits:
|
||||
return prediction_scores[:, :-1, :].contiguous()
|
||||
|
||||
lm_loss = None
|
||||
if labels is not None:
|
||||
# we are doing next-token prediction; shift prediction scores and input ids by one
|
||||
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
|
||||
labels = labels[:, 1:].contiguous()
|
||||
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
|
||||
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
||||
if reduction=='none':
|
||||
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
|
||||
|
||||
if not return_dict:
|
||||
output = (prediction_scores,) + outputs[2:]
|
||||
return ((lm_loss,) + output) if lm_loss is not None else output
|
||||
|
||||
return CausalLMOutputWithCrossAttentions(
|
||||
loss=lm_loss,
|
||||
logits=prediction_scores,
|
||||
past_key_values=outputs.past_key_values,
|
||||
hidden_states=outputs.hidden_states,
|
||||
attentions=outputs.attentions,
|
||||
cross_attentions=outputs.cross_attentions,
|
||||
)
|
||||
|
||||
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
|
||||
input_shape = input_ids.shape
|
||||
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
||||
if attention_mask is None:
|
||||
attention_mask = input_ids.new_ones(input_shape)
|
||||
|
||||
# cut decoder_input_ids if past is used
|
||||
if past is not None:
|
||||
input_ids = input_ids[:, -1:]
|
||||
|
||||
return {
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
"past_key_values": past,
|
||||
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
|
||||
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
|
||||
"is_decoder": True,
|
||||
}
|
||||
|
||||
def _reorder_cache(self, past, beam_idx):
|
||||
reordered_past = ()
|
||||
for layer_past in past:
|
||||
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
||||
return reordered_past
|
||||
423
custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_module.py
Normal file
@@ -0,0 +1,423 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
from .blip_vit import VisionTransformer, interpolate_pos_embed
|
||||
from .blip_med import BertConfig, BertModel, BertLMHeadModel
|
||||
from transformers import BertTokenizer
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from timm.models.hub import download_cached_file
|
||||
import numpy as np
|
||||
|
||||
from pathlib import Path
|
||||
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# BLIP
|
||||
|
||||
class BLIP_Base(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'),
|
||||
image_size = 224,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
||||
self.tokenizer = init_tokenizer()
|
||||
med_config = BertConfig.from_json_file(med_config)
|
||||
med_config.encoder_width = vision_width
|
||||
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
||||
|
||||
|
||||
def forward(self, image, caption, mode):
|
||||
|
||||
assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal"
|
||||
text = self.tokenizer(caption, return_tensors="pt").to(image.device)
|
||||
|
||||
if mode=='image':
|
||||
# return image features
|
||||
image_embeds = self.visual_encoder(image)
|
||||
return image_embeds
|
||||
|
||||
elif mode=='text':
|
||||
# return text features
|
||||
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
||||
return_dict = True, mode = 'text')
|
||||
return text_output.last_hidden_state
|
||||
|
||||
elif mode=='multimodal':
|
||||
# return multimodel features
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
|
||||
text.input_ids[:,0] = self.tokenizer.enc_token_id
|
||||
output = self.text_encoder(text.input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True,
|
||||
)
|
||||
return output.last_hidden_state
|
||||
|
||||
|
||||
|
||||
class BLIP_Decoder(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'),
|
||||
image_size = 384,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
prompt = 'a picture of ',
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
||||
self.tokenizer = init_tokenizer()
|
||||
med_config = BertConfig.from_json_file(med_config)
|
||||
med_config.encoder_width = vision_width
|
||||
self.text_decoder = BertLMHeadModel(config=med_config)
|
||||
|
||||
self.prompt = prompt
|
||||
self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1
|
||||
|
||||
|
||||
def forward(self, image, caption):
|
||||
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
|
||||
text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device)
|
||||
|
||||
text.input_ids[:,0] = self.tokenizer.bos_token_id
|
||||
|
||||
decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100)
|
||||
decoder_targets[:,:self.prompt_length] = -100
|
||||
|
||||
decoder_output = self.text_decoder(text.input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
labels = decoder_targets,
|
||||
return_dict = True,
|
||||
)
|
||||
loss_lm = decoder_output.loss
|
||||
|
||||
return loss_lm
|
||||
|
||||
def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0):
|
||||
image_embeds = self.visual_encoder(image)
|
||||
|
||||
if not sample:
|
||||
image_embeds = image_embeds.repeat_interleave(num_beams,dim=0)
|
||||
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts}
|
||||
|
||||
prompt = [self.prompt] * image.size(0)
|
||||
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
|
||||
input_ids[:,0] = self.tokenizer.bos_token_id
|
||||
input_ids = input_ids[:, :-1]
|
||||
|
||||
if sample:
|
||||
#nucleus sampling
|
||||
outputs = self.text_decoder.generate(input_ids=input_ids,
|
||||
max_length=max_length,
|
||||
min_length=min_length,
|
||||
do_sample=True,
|
||||
top_p=top_p,
|
||||
num_return_sequences=1,
|
||||
eos_token_id=self.tokenizer.sep_token_id,
|
||||
pad_token_id=self.tokenizer.pad_token_id,
|
||||
repetition_penalty=1.1,
|
||||
**model_kwargs)
|
||||
else:
|
||||
#beam search
|
||||
outputs = self.text_decoder.generate(input_ids=input_ids,
|
||||
max_length=max_length,
|
||||
min_length=min_length,
|
||||
num_beams=num_beams,
|
||||
eos_token_id=self.tokenizer.sep_token_id,
|
||||
pad_token_id=self.tokenizer.pad_token_id,
|
||||
repetition_penalty=repetition_penalty,
|
||||
**model_kwargs)
|
||||
|
||||
captions = []
|
||||
for output in outputs:
|
||||
caption = self.tokenizer.decode(output, skip_special_tokens=True)
|
||||
captions.append(caption[len(self.prompt):])
|
||||
return captions
|
||||
|
||||
|
||||
def blip_decoder(pretrained='',**kwargs):
|
||||
model = BLIP_Decoder(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
assert(len(msg.missing_keys)==0)
|
||||
return model
|
||||
|
||||
def blip_feature_extractor(pretrained='',**kwargs):
|
||||
model = BLIP_Base(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
assert(len(msg.missing_keys)==0)
|
||||
return model
|
||||
|
||||
def init_tokenizer():
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
tokenizer.add_special_tokens({'bos_token':'[DEC]'})
|
||||
tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
|
||||
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
|
||||
return tokenizer
|
||||
|
||||
|
||||
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
|
||||
|
||||
assert vit in ['base', 'large'], "vit parameter must be base or large"
|
||||
if vit=='base':
|
||||
vision_width = 768
|
||||
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
|
||||
num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
||||
drop_path_rate=0 or drop_path_rate
|
||||
)
|
||||
elif vit=='large':
|
||||
vision_width = 1024
|
||||
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
|
||||
num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
||||
drop_path_rate=0.1 or drop_path_rate
|
||||
)
|
||||
return visual_encoder, vision_width
|
||||
|
||||
def is_url(url_or_filename):
|
||||
parsed = urlparse(url_or_filename)
|
||||
return parsed.scheme in ("http", "https")
|
||||
|
||||
def load_checkpoint(model,url_or_filename):
|
||||
if is_url(url_or_filename):
|
||||
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
||||
checkpoint = torch.load(cached_file, map_location='cpu')
|
||||
elif os.path.isfile(url_or_filename):
|
||||
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
||||
else:
|
||||
raise RuntimeError('checkpoint url or path is invalid')
|
||||
|
||||
state_dict = checkpoint['model']
|
||||
|
||||
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
|
||||
if 'visual_encoder_m.pos_embed' in model.state_dict().keys():
|
||||
state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
|
||||
model.visual_encoder_m)
|
||||
for key in model.state_dict().keys():
|
||||
if key in state_dict.keys():
|
||||
if state_dict[key].shape!=model.state_dict()[key].shape:
|
||||
del state_dict[key]
|
||||
|
||||
msg = model.load_state_dict(state_dict,strict=False)
|
||||
print('load checkpoint from %s'%url_or_filename)
|
||||
return model,msg
|
||||
|
||||
# BLIP VQA
|
||||
|
||||
class BLIP_VQA(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'),
|
||||
image_size = 480,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer, drop_path_rate=0.1)
|
||||
self.tokenizer = init_tokenizer()
|
||||
|
||||
encoder_config = BertConfig.from_json_file(med_config)
|
||||
encoder_config.encoder_width = vision_width
|
||||
self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False)
|
||||
|
||||
decoder_config = BertConfig.from_json_file(med_config)
|
||||
self.text_decoder = BertLMHeadModel(config=decoder_config)
|
||||
|
||||
|
||||
def forward(self, image, question, answer=None, n=None, weights=None, train=True, inference='rank', k_test=128):
|
||||
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
|
||||
question = self.tokenizer(question, padding='longest', truncation=True, max_length=35,
|
||||
return_tensors="pt").to(image.device)
|
||||
question.input_ids[:,0] = self.tokenizer.enc_token_id
|
||||
|
||||
if train:
|
||||
'''
|
||||
n: number of answers for each question
|
||||
weights: weight for each answer
|
||||
'''
|
||||
answer = self.tokenizer(answer, padding='longest', return_tensors="pt").to(image.device)
|
||||
answer.input_ids[:,0] = self.tokenizer.bos_token_id
|
||||
answer_targets = answer.input_ids.masked_fill(answer.input_ids == self.tokenizer.pad_token_id, -100)
|
||||
|
||||
question_output = self.text_encoder(question.input_ids,
|
||||
attention_mask = question.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True)
|
||||
|
||||
question_states = []
|
||||
question_atts = []
|
||||
for b, n in enumerate(n):
|
||||
question_states += [question_output.last_hidden_state[b]]*n
|
||||
question_atts += [question.attention_mask[b]]*n
|
||||
question_states = torch.stack(question_states,0)
|
||||
question_atts = torch.stack(question_atts,0)
|
||||
|
||||
answer_output = self.text_decoder(answer.input_ids,
|
||||
attention_mask = answer.attention_mask,
|
||||
encoder_hidden_states = question_states,
|
||||
encoder_attention_mask = question_atts,
|
||||
labels = answer_targets,
|
||||
return_dict = True,
|
||||
reduction = 'none',
|
||||
)
|
||||
|
||||
loss = weights * answer_output.loss
|
||||
loss = loss.sum()/image.size(0)
|
||||
|
||||
return loss
|
||||
|
||||
|
||||
else:
|
||||
question_output = self.text_encoder(question.input_ids,
|
||||
attention_mask = question.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True)
|
||||
|
||||
if inference=='generate':
|
||||
num_beams = 3
|
||||
question_states = question_output.last_hidden_state.repeat_interleave(num_beams,dim=0)
|
||||
question_atts = torch.ones(question_states.size()[:-1],dtype=torch.long).to(question_states.device)
|
||||
model_kwargs = {"encoder_hidden_states": question_states, "encoder_attention_mask":question_atts}
|
||||
|
||||
bos_ids = torch.full((image.size(0),1),fill_value=self.tokenizer.bos_token_id,device=image.device)
|
||||
|
||||
outputs = self.text_decoder.generate(input_ids=bos_ids,
|
||||
max_length=10,
|
||||
min_length=1,
|
||||
num_beams=num_beams,
|
||||
eos_token_id=self.tokenizer.sep_token_id,
|
||||
pad_token_id=self.tokenizer.pad_token_id,
|
||||
**model_kwargs)
|
||||
|
||||
answers = []
|
||||
for output in outputs:
|
||||
answer = self.tokenizer.decode(output, skip_special_tokens=True)
|
||||
answers.append(answer)
|
||||
return answers
|
||||
|
||||
elif inference=='rank':
|
||||
max_ids = self.rank_answer(question_output.last_hidden_state, question.attention_mask,
|
||||
answer.input_ids, answer.attention_mask, k_test)
|
||||
return max_ids
|
||||
|
||||
|
||||
|
||||
def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k):
|
||||
|
||||
num_ques = question_states.size(0)
|
||||
start_ids = answer_ids[0,0].repeat(num_ques,1) # bos token
|
||||
|
||||
start_output = self.text_decoder(start_ids,
|
||||
encoder_hidden_states = question_states,
|
||||
encoder_attention_mask = question_atts,
|
||||
return_dict = True,
|
||||
reduction = 'none')
|
||||
logits = start_output.logits[:,0,:] # first token's logit
|
||||
|
||||
# topk_probs: top-k probability
|
||||
# topk_ids: [num_question, k]
|
||||
answer_first_token = answer_ids[:,1]
|
||||
prob_first_token = F.softmax(logits,dim=1).index_select(dim=1, index=answer_first_token)
|
||||
topk_probs, topk_ids = prob_first_token.topk(k,dim=1)
|
||||
|
||||
# answer input: [num_question*k, answer_len]
|
||||
input_ids = []
|
||||
input_atts = []
|
||||
for b, topk_id in enumerate(topk_ids):
|
||||
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
|
||||
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
|
||||
input_ids = torch.cat(input_ids,dim=0)
|
||||
input_atts = torch.cat(input_atts,dim=0)
|
||||
|
||||
targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
|
||||
|
||||
# repeat encoder's output for top-k answers
|
||||
question_states = tile(question_states, 0, k)
|
||||
question_atts = tile(question_atts, 0, k)
|
||||
|
||||
output = self.text_decoder(input_ids,
|
||||
attention_mask = input_atts,
|
||||
encoder_hidden_states = question_states,
|
||||
encoder_attention_mask = question_atts,
|
||||
labels = targets_ids,
|
||||
return_dict = True,
|
||||
reduction = 'none')
|
||||
|
||||
log_probs_sum = -output.loss
|
||||
log_probs_sum = log_probs_sum.view(num_ques,k)
|
||||
|
||||
max_topk_ids = log_probs_sum.argmax(dim=1)
|
||||
max_ids = topk_ids[max_topk_ids>=0,max_topk_ids]
|
||||
|
||||
return max_ids
|
||||
|
||||
|
||||
def blip_vqa(pretrained='',**kwargs):
|
||||
model = BLIP_VQA(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
# assert(len(msg.missing_keys)==0)
|
||||
return model
|
||||
|
||||
|
||||
def tile(x, dim, n_tile):
|
||||
init_dim = x.size(dim)
|
||||
repeat_idx = [1] * x.dim()
|
||||
repeat_idx[dim] = n_tile
|
||||
x = x.repeat(*(repeat_idx))
|
||||
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
|
||||
return torch.index_select(x, dim, order_index.to(x.device))
|
||||
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
Copyright (c) 2022, Salesforce.com, Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
305
custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_vit.py
Normal file
@@ -0,0 +1,305 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
* Based on timm code base
|
||||
* https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
||||
'''
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from functools import partial
|
||||
|
||||
from timm.models.vision_transformer import _cfg, PatchEmbed
|
||||
from timm.models.registry import register_model
|
||||
from timm.models.layers import trunc_normal_, DropPath
|
||||
from timm.models.helpers import named_apply, adapt_input_conv
|
||||
|
||||
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
|
||||
|
||||
class Mlp(nn.Module):
|
||||
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
|
||||
"""
|
||||
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
||||
super().__init__()
|
||||
out_features = out_features or in_features
|
||||
hidden_features = hidden_features or in_features
|
||||
self.fc1 = nn.Linear(in_features, hidden_features)
|
||||
self.act = act_layer()
|
||||
self.fc2 = nn.Linear(hidden_features, out_features)
|
||||
self.drop = nn.Dropout(drop)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.fc1(x)
|
||||
x = self.act(x)
|
||||
x = self.drop(x)
|
||||
x = self.fc2(x)
|
||||
x = self.drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class Attention(nn.Module):
|
||||
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
|
||||
self.scale = qk_scale or head_dim ** -0.5
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
self.attn_gradients = None
|
||||
self.attention_map = None
|
||||
|
||||
def save_attn_gradients(self, attn_gradients):
|
||||
self.attn_gradients = attn_gradients
|
||||
|
||||
def get_attn_gradients(self):
|
||||
return self.attn_gradients
|
||||
|
||||
def save_attention_map(self, attention_map):
|
||||
self.attention_map = attention_map
|
||||
|
||||
def get_attention_map(self):
|
||||
return self.attention_map
|
||||
|
||||
def forward(self, x, register_hook=False):
|
||||
B, N, C = x.shape
|
||||
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
||||
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
|
||||
if register_hook:
|
||||
self.save_attention_map(attn)
|
||||
attn.register_hook(self.save_attn_gradients)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
|
||||
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
||||
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False):
|
||||
super().__init__()
|
||||
self.norm1 = norm_layer(dim)
|
||||
self.attn = Attention(
|
||||
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
||||
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
||||
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.norm2 = norm_layer(dim)
|
||||
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
||||
|
||||
if use_grad_checkpointing:
|
||||
self.attn = checkpoint_wrapper(self.attn)
|
||||
self.mlp = checkpoint_wrapper(self.mlp)
|
||||
|
||||
def forward(self, x, register_hook=False):
|
||||
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
|
||||
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
||||
return x
|
||||
|
||||
|
||||
class VisionTransformer(nn.Module):
|
||||
""" Vision Transformer
|
||||
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
|
||||
https://arxiv.org/abs/2010.11929
|
||||
"""
|
||||
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
|
||||
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
|
||||
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
|
||||
use_grad_checkpointing=False, ckpt_layer=0):
|
||||
"""
|
||||
Args:
|
||||
img_size (int, tuple): input image size
|
||||
patch_size (int, tuple): patch size
|
||||
in_chans (int): number of input channels
|
||||
num_classes (int): number of classes for classification head
|
||||
embed_dim (int): embedding dimension
|
||||
depth (int): depth of transformer
|
||||
num_heads (int): number of attention heads
|
||||
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
||||
qkv_bias (bool): enable bias for qkv if True
|
||||
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
|
||||
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
|
||||
drop_rate (float): dropout rate
|
||||
attn_drop_rate (float): attention dropout rate
|
||||
drop_path_rate (float): stochastic depth rate
|
||||
norm_layer: (nn.Module): normalization layer
|
||||
"""
|
||||
super().__init__()
|
||||
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
||||
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
|
||||
|
||||
self.patch_embed = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
||||
|
||||
num_patches = self.patch_embed.num_patches
|
||||
|
||||
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
||||
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
||||
self.pos_drop = nn.Dropout(p=drop_rate)
|
||||
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
||||
self.blocks = nn.ModuleList([
|
||||
Block(
|
||||
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
||||
use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer)
|
||||
)
|
||||
for i in range(depth)])
|
||||
self.norm = norm_layer(embed_dim)
|
||||
|
||||
trunc_normal_(self.pos_embed, std=.02)
|
||||
trunc_normal_(self.cls_token, std=.02)
|
||||
self.apply(self._init_weights)
|
||||
|
||||
def _init_weights(self, m):
|
||||
if isinstance(m, nn.Linear):
|
||||
trunc_normal_(m.weight, std=.02)
|
||||
if isinstance(m, nn.Linear) and m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay(self):
|
||||
return {'pos_embed', 'cls_token'}
|
||||
|
||||
def forward(self, x, register_blk=-1):
|
||||
B = x.shape[0]
|
||||
x = self.patch_embed(x)
|
||||
|
||||
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
||||
x = torch.cat((cls_tokens, x), dim=1)
|
||||
|
||||
x = x + self.pos_embed[:,:x.size(1),:]
|
||||
x = self.pos_drop(x)
|
||||
|
||||
for i,blk in enumerate(self.blocks):
|
||||
x = blk(x, register_blk==i)
|
||||
x = self.norm(x)
|
||||
|
||||
return x
|
||||
|
||||
@torch.jit.ignore()
|
||||
def load_pretrained(self, checkpoint_path, prefix=''):
|
||||
_load_weights(self, checkpoint_path, prefix)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
|
||||
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
def _n2p(w, t=True):
|
||||
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
|
||||
w = w.flatten()
|
||||
if t:
|
||||
if w.ndim == 4:
|
||||
w = w.transpose([3, 2, 0, 1])
|
||||
elif w.ndim == 3:
|
||||
w = w.transpose([2, 0, 1])
|
||||
elif w.ndim == 2:
|
||||
w = w.transpose([1, 0])
|
||||
return torch.from_numpy(w)
|
||||
|
||||
w = np.load(checkpoint_path)
|
||||
if not prefix and 'opt/target/embedding/kernel' in w:
|
||||
prefix = 'opt/target/'
|
||||
|
||||
if hasattr(model.patch_embed, 'backbone'):
|
||||
# hybrid
|
||||
backbone = model.patch_embed.backbone
|
||||
stem_only = not hasattr(backbone, 'stem')
|
||||
stem = backbone if stem_only else backbone.stem
|
||||
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
|
||||
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
|
||||
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
|
||||
if not stem_only:
|
||||
for i, stage in enumerate(backbone.stages):
|
||||
for j, block in enumerate(stage.blocks):
|
||||
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
|
||||
for r in range(3):
|
||||
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
|
||||
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
|
||||
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
|
||||
if block.downsample is not None:
|
||||
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
|
||||
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
|
||||
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
|
||||
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
|
||||
else:
|
||||
embed_conv_w = adapt_input_conv(
|
||||
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
|
||||
model.patch_embed.proj.weight.copy_(embed_conv_w)
|
||||
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
|
||||
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
|
||||
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
|
||||
if pos_embed_w.shape != model.pos_embed.shape:
|
||||
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
|
||||
pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
|
||||
model.pos_embed.copy_(pos_embed_w)
|
||||
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
|
||||
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
|
||||
# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
|
||||
# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
|
||||
# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
|
||||
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
|
||||
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
|
||||
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
|
||||
for i, block in enumerate(model.blocks.children()):
|
||||
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
|
||||
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
|
||||
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
|
||||
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
|
||||
block.attn.qkv.weight.copy_(torch.cat([
|
||||
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
|
||||
block.attn.qkv.bias.copy_(torch.cat([
|
||||
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
|
||||
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
|
||||
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
|
||||
for r in range(2):
|
||||
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
|
||||
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
|
||||
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
|
||||
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
|
||||
|
||||
|
||||
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
|
||||
# interpolate position embedding
|
||||
embedding_size = pos_embed_checkpoint.shape[-1]
|
||||
num_patches = visual_encoder.patch_embed.num_patches
|
||||
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
|
||||
# height (== width) for the checkpoint position embedding
|
||||
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
||||
# height (== width) for the new position embedding
|
||||
new_size = int(num_patches ** 0.5)
|
||||
|
||||
if orig_size!=new_size:
|
||||
# class_token and dist_token are kept unchanged
|
||||
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
||||
# only the position tokens are interpolated
|
||||
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
||||
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
||||
pos_tokens = torch.nn.functional.interpolate(
|
||||
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
||||
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
||||
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
||||
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
|
||||
|
||||
return new_pos_embed
|
||||
else:
|
||||
return pos_embed_checkpoint
|
||||
15
custom_nodes/was-node-suite-comfyui/pyproject.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[project]
|
||||
name = "was-node-suite-comfyui"
|
||||
description = ""
|
||||
version = "1.0.2"
|
||||
license = { file = "LICENSE" }
|
||||
dependencies = ["cmake", "fairscale>=0.4.4", "git+https://github.com/WASasquatch/img2texture.git", "git+https://github.com/WASasquatch/cstr", "gitpython", "imageio", "joblib", "matplotlib", "numba", "numpy", "opencv-python-headless[ffmpeg]<=4.7.0.72", "pilgram", "git+https://github.com/WASasquatch/ffmpy.git", "rembg", "scikit-image>=0.20.0", "scikit-learn", "scipy", "timm>=0.4.12", "tqdm", "transformers"]
|
||||
|
||||
[project.urls]
|
||||
Repository = "https://github.com/WASasquatch/was-node-suite-comfyui"
|
||||
# Used by Comfy Registry https://comfyregistry.org
|
||||
|
||||
[tool.comfy]
|
||||
PublisherId = "was"
|
||||
DisplayName = "WAS Node Suite"
|
||||
Icon = ""
|
||||
42
custom_nodes/was-node-suite-comfyui/repos/SAM/.gitignore
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
.nfs*
|
||||
|
||||
# compilation and distribution
|
||||
__pycache__
|
||||
_ext
|
||||
*.pyc
|
||||
*.pyd
|
||||
*.so
|
||||
*.dll
|
||||
*.egg-info/
|
||||
build/
|
||||
dist/
|
||||
wheels/
|
||||
|
||||
# pytorch/python/numpy formats
|
||||
*.pth
|
||||
*.pkl
|
||||
*.npy
|
||||
*.ts
|
||||
model_ts*.txt
|
||||
|
||||
# onnx models
|
||||
*.onnx
|
||||
|
||||
# ipython/jupyter notebooks
|
||||
**/.ipynb_checkpoints/
|
||||
|
||||
# Editor temporaries
|
||||
*.swn
|
||||
*.swo
|
||||
*.swp
|
||||
*~
|
||||
|
||||
# editor settings
|
||||
.idea
|
||||
.vscode
|
||||
_darcs
|
||||
|
||||
# demo
|
||||
**/node_modules
|
||||
yarn.lock
|
||||
package-lock.json
|
||||
@@ -0,0 +1,80 @@
|
||||
# Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to make participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all project spaces, and it also applies when
|
||||
an individual is representing the project or its community in public spaces.
|
||||
Examples of representing a project or community include using an official
|
||||
project e-mail address, posting via an official social media account, or acting
|
||||
as an appointed representative at an online or offline event. Representation of
|
||||
a project may be further defined and clarified by project maintainers.
|
||||
|
||||
This Code of Conduct also applies outside the project spaces when there is a
|
||||
reasonable belief that an individual's behavior may have a negative impact on
|
||||
the project or its community.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at <opensource-conduct@fb.com>. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
@@ -0,0 +1,31 @@
|
||||
# Contributing to segment-anything
|
||||
We want to make contributing to this project as easy and transparent as
|
||||
possible.
|
||||
|
||||
## Pull Requests
|
||||
We actively welcome your pull requests.
|
||||
|
||||
1. Fork the repo and create your branch from `main`.
|
||||
2. If you've added code that should be tested, add tests.
|
||||
3. If you've changed APIs, update the documentation.
|
||||
4. Ensure the test suite passes.
|
||||
5. Make sure your code lints, using the `linter.sh` script in the project's root directory. Linting requires `black==23.*`, `isort==5.12.0`, `flake8`, and `mypy`.
|
||||
6. If you haven't already, complete the Contributor License Agreement ("CLA").
|
||||
|
||||
## Contributor License Agreement ("CLA")
|
||||
In order to accept your pull request, we need you to submit a CLA. You only need
|
||||
to do this once to work on any of Facebook's open source projects.
|
||||
|
||||
Complete your CLA here: <https://code.facebook.com/cla>
|
||||
|
||||
## Issues
|
||||
We use GitHub issues to track public bugs. Please ensure your description is
|
||||
clear and has sufficient instructions to be able to reproduce the issue.
|
||||
|
||||
Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
||||
disclosure of security bugs. In those cases, please go through the process
|
||||
outlined on that page and do not file a public issue.
|
||||
|
||||
## License
|
||||
By contributing to segment-anything, you agree that your contributions will be licensed
|
||||
under the LICENSE file in the root directory of this source tree.
|
||||
201
custom_nodes/was-node-suite-comfyui/repos/SAM/LICENSE
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
171
custom_nodes/was-node-suite-comfyui/repos/SAM/README.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# Segment Anything
|
||||
|
||||
**[Meta AI Research, FAIR](https://ai.facebook.com/research/)**
|
||||
|
||||
[Alexander Kirillov](https://alexander-kirillov.github.io/), [Eric Mintun](https://ericmintun.github.io/), [Nikhila Ravi](https://nikhilaravi.com/), [Hanzi Mao](https://hanzimao.me/), Chloe Rolland, Laura Gustafson, [Tete Xiao](https://tetexiao.com), [Spencer Whitehead](https://www.spencerwhitehead.com/), Alex Berg, Wan-Yen Lo, [Piotr Dollar](https://pdollar.github.io/), [Ross Girshick](https://www.rossgirshick.info/)
|
||||
|
||||
[[`Paper`](https://ai.facebook.com/research/publications/segment-anything/)] [[`Project`](https://segment-anything.com/)] [[`Demo`](https://segment-anything.com/demo)] [[`Dataset`](https://segment-anything.com/dataset/index.html)] [[`Blog`](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/)] [[`BibTeX`](#citing-segment-anything)]
|
||||
|
||||

|
||||
|
||||
The **Segment Anything Model (SAM)** produces high quality object masks from input prompts such as points or boxes, and it can be used to generate masks for all objects in an image. It has been trained on a [dataset](https://segment-anything.com/dataset/index.html) of 11 million images and 1.1 billion masks, and has strong zero-shot performance on a variety of segmentation tasks.
|
||||
|
||||
<p float="left">
|
||||
<img src="assets/masks1.png?raw=true" width="37.25%" />
|
||||
<img src="assets/masks2.jpg?raw=true" width="61.5%" />
|
||||
</p>
|
||||
|
||||
## Installation
|
||||
|
||||
The code requires `python>=3.8`, as well as `pytorch>=1.7` and `torchvision>=0.8`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. Installing both PyTorch and TorchVision with CUDA support is strongly recommended.
|
||||
|
||||
Install Segment Anything:
|
||||
|
||||
```
|
||||
pip install git+https://github.com/facebookresearch/segment-anything.git
|
||||
```
|
||||
|
||||
or clone the repository locally and install with
|
||||
|
||||
```
|
||||
git clone git@github.com:facebookresearch/segment-anything.git
|
||||
cd segment-anything; pip install -e .
|
||||
```
|
||||
|
||||
The following optional dependencies are necessary for mask post-processing, saving masks in COCO format, the example notebooks, and exporting the model in ONNX format. `jupyter` is also required to run the example notebooks.
|
||||
|
||||
```
|
||||
pip install opencv-python pycocotools matplotlib onnxruntime onnx
|
||||
```
|
||||
|
||||
## <a name="GettingStarted"></a>Getting Started
|
||||
|
||||
First download a [model checkpoint](#model-checkpoints). Then the model can be used in just a few lines to get masks from a given prompt:
|
||||
|
||||
```
|
||||
from segment_anything import SamPredictor, sam_model_registry
|
||||
sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
|
||||
predictor = SamPredictor(sam)
|
||||
predictor.set_image(<your_image>)
|
||||
masks, _, _ = predictor.predict(<input_prompts>)
|
||||
```
|
||||
|
||||
or generate masks for an entire image:
|
||||
|
||||
```
|
||||
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
|
||||
sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
|
||||
mask_generator = SamAutomaticMaskGenerator(sam)
|
||||
masks = mask_generator.generate(<your_image>)
|
||||
```
|
||||
|
||||
Additionally, masks can be generated for images from the command line:
|
||||
|
||||
```
|
||||
python scripts/amg.py --checkpoint <path/to/checkpoint> --model-type <model_type> --input <image_or_folder> --output <path/to/output>
|
||||
```
|
||||
|
||||
See the examples notebooks on [using SAM with prompts](/notebooks/predictor_example.ipynb) and [automatically generating masks](/notebooks/automatic_mask_generator_example.ipynb) for more details.
|
||||
|
||||
<p float="left">
|
||||
<img src="assets/notebook1.png?raw=true" width="49.1%" />
|
||||
<img src="assets/notebook2.png?raw=true" width="48.9%" />
|
||||
</p>
|
||||
|
||||
## ONNX Export
|
||||
|
||||
SAM's lightweight mask decoder can be exported to ONNX format so that it can be run in any environment that supports ONNX runtime, such as in-browser as showcased in the [demo](https://segment-anything.com/demo). Export the model with
|
||||
|
||||
```
|
||||
python scripts/export_onnx_model.py --checkpoint <path/to/checkpoint> --model-type <model_type> --output <path/to/output>
|
||||
```
|
||||
|
||||
See the [example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb) for details on how to combine image preprocessing via SAM's backbone with mask prediction using the ONNX model. It is recommended to use the latest stable version of PyTorch for ONNX export.
|
||||
|
||||
### Web demo
|
||||
|
||||
The `demo/` folder has a simple one page React app which shows how to run mask prediction with the exported ONNX model in a web browser with multithreading. Please see [`demo/README.md`](https://github.com/facebookresearch/segment-anything/blob/main/demo/README.md) for more details.
|
||||
|
||||
## <a name="Models"></a>Model Checkpoints
|
||||
|
||||
Three model versions of the model are available with different backbone sizes. These models can be instantiated by running
|
||||
|
||||
```
|
||||
from segment_anything import sam_model_registry
|
||||
sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
|
||||
```
|
||||
|
||||
Click the links below to download the checkpoint for the corresponding model type.
|
||||
|
||||
- **`default` or `vit_h`: [ViT-H SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth)**
|
||||
- `vit_l`: [ViT-L SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth)
|
||||
- `vit_b`: [ViT-B SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth)
|
||||
|
||||
## Dataset
|
||||
|
||||
See [here](https://ai.facebook.com/datasets/segment-anything/) for an overview of the datastet. The dataset can be downloaded [here](https://ai.facebook.com/datasets/segment-anything-downloads/). By downloading the datasets you agree that you have read and accepted the terms of the SA-1B Dataset Research License.
|
||||
|
||||
We save masks per image as a json file. It can be loaded as a dictionary in python in the below format.
|
||||
|
||||
```python
|
||||
{
|
||||
"image" : image_info,
|
||||
"annotations" : [annotation],
|
||||
}
|
||||
|
||||
image_info {
|
||||
"image_id" : int, # Image id
|
||||
"width" : int, # Image width
|
||||
"height" : int, # Image height
|
||||
"file_name" : str, # Image filename
|
||||
}
|
||||
|
||||
annotation {
|
||||
"id" : int, # Annotation id
|
||||
"segmentation" : dict, # Mask saved in COCO RLE format.
|
||||
"bbox" : [x, y, w, h], # The box around the mask, in XYWH format
|
||||
"area" : int, # The area in pixels of the mask
|
||||
"predicted_iou" : float, # The model's own prediction of the mask's quality
|
||||
"stability_score" : float, # A measure of the mask's quality
|
||||
"crop_box" : [x, y, w, h], # The crop of the image used to generate the mask, in XYWH format
|
||||
"point_coords" : [[x, y]], # The point coordinates input to the model to generate the mask
|
||||
}
|
||||
```
|
||||
|
||||
Image ids can be found in sa_images_ids.txt which can be downloaded using the above [link](https://ai.facebook.com/datasets/segment-anything-downloads/) as well.
|
||||
|
||||
To decode a mask in COCO RLE format into binary:
|
||||
|
||||
```
|
||||
from pycocotools import mask as mask_utils
|
||||
mask = mask_utils.decode(annotation["segmentation"])
|
||||
```
|
||||
|
||||
See [here](https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py) for more instructions to manipulate masks stored in RLE format.
|
||||
|
||||
## License
|
||||
|
||||
The model is licensed under the [Apache 2.0 license](LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
## Contributors
|
||||
|
||||
The Segment Anything project was made possible with the help of many contributors (alphabetical):
|
||||
|
||||
Aaron Adcock, Vaibhav Aggarwal, Morteza Behrooz, Cheng-Yang Fu, Ashley Gabriel, Ahuva Goldstand, Allen Goodman, Sumanth Gurram, Jiabo Hu, Somya Jain, Devansh Kukreja, Robert Kuo, Joshua Lane, Yanghao Li, Lilian Luong, Jitendra Malik, Mallika Malhotra, William Ngan, Omkar Parkhi, Nikhil Raina, Dirk Rowe, Neil Sejoor, Vanessa Stark, Bala Varadarajan, Bram Wasti, Zachary Winstrom
|
||||
|
||||
## Citing Segment Anything
|
||||
|
||||
If you use SAM or SA-1B in your research, please use the following BibTeX entry.
|
||||
|
||||
```
|
||||
@article{kirillov2023segany,
|
||||
title={Segment Anything},
|
||||
author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\'a}r, Piotr and Girshick, Ross},
|
||||
journal={arXiv:2304.02643},
|
||||
year={2023}
|
||||
}
|
||||
```
|
||||
BIN
custom_nodes/was-node-suite-comfyui/repos/SAM/assets/masks1.png
Normal file
|
After Width: | Height: | Size: 3.5 MiB |
BIN
custom_nodes/was-node-suite-comfyui/repos/SAM/assets/masks2.jpg
Normal file
|
After Width: | Height: | Size: 130 KiB |
|
After Width: | Height: | Size: 1.9 MiB |
|
After Width: | Height: | Size: 568 KiB |
|
After Width: | Height: | Size: 854 KiB |
|
After Width: | Height: | Size: 1.2 MiB |
126
custom_nodes/was-node-suite-comfyui/repos/SAM/demo/README.md
Normal file
@@ -0,0 +1,126 @@
|
||||
## Segment Anything Simple Web demo
|
||||
|
||||
This **front-end only** React based web demo shows how to load a fixed image and corresponding `.npy` file of the SAM image embedding, and run the SAM ONNX model in the browser using Web Assembly with mulithreading enabled by `SharedArrayBuffer`, Web Worker, and SIMD128.
|
||||
|
||||
<img src="https://github.com/facebookresearch/segment-anything/raw/main/assets/minidemo.gif" width="500"/>
|
||||
|
||||
## Run the app
|
||||
|
||||
Install Yarn
|
||||
|
||||
```
|
||||
npm install --g yarn
|
||||
```
|
||||
|
||||
Build and run:
|
||||
|
||||
```
|
||||
yarn && yarn start
|
||||
```
|
||||
|
||||
Navigate to [`http://localhost:8081/`](http://localhost:8081/)
|
||||
|
||||
Move your cursor around to see the mask prediction update in real time.
|
||||
|
||||
## Export the image embedding
|
||||
|
||||
In the [ONNX Model Example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb) upload the image of your choice and generate and save corresponding embedding.
|
||||
|
||||
Initialize the predictor:
|
||||
|
||||
```python
|
||||
checkpoint = "sam_vit_h_4b8939.pth"
|
||||
model_type = "vit_h"
|
||||
sam = sam_model_registry[model_type](checkpoint=checkpoint)
|
||||
sam.to(device='cuda')
|
||||
predictor = SamPredictor(sam)
|
||||
```
|
||||
|
||||
Set the new image and export the embedding:
|
||||
|
||||
```
|
||||
image = cv2.imread('src/assets/dogs.jpg')
|
||||
predictor.set_image(image)
|
||||
image_embedding = predictor.get_image_embedding().cpu().numpy()
|
||||
np.save("dogs_embedding.npy", image_embedding)
|
||||
```
|
||||
|
||||
Save the new image and embedding in `src/assets/data`.
|
||||
|
||||
## Export the ONNX model
|
||||
|
||||
You also need to export the quantized ONNX model from the [ONNX Model Example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb).
|
||||
|
||||
Run the cell in the notebook which saves the `sam_onnx_quantized_example.onnx` file, download it and copy it to the path `/model/sam_onnx_quantized_example.onnx`.
|
||||
|
||||
Here is a snippet of the export/quantization code:
|
||||
|
||||
```
|
||||
onnx_model_path = "sam_onnx_example.onnx"
|
||||
onnx_model_quantized_path = "sam_onnx_quantized_example.onnx"
|
||||
quantize_dynamic(
|
||||
model_input=onnx_model_path,
|
||||
model_output=onnx_model_quantized_path,
|
||||
optimize_model=True,
|
||||
per_channel=False,
|
||||
reduce_range=False,
|
||||
weight_type=QuantType.QUInt8,
|
||||
)
|
||||
```
|
||||
|
||||
**NOTE: if you change the ONNX model by using a new checkpoint you need to also re-export the embedding.**
|
||||
|
||||
## Update the image, embedding, model in the app
|
||||
|
||||
Update the following file paths at the top of`App.tsx`:
|
||||
|
||||
```py
|
||||
const IMAGE_PATH = "/assets/data/dogs.jpg";
|
||||
const IMAGE_EMBEDDING = "/assets/data/dogs_embedding.npy";
|
||||
const MODEL_DIR = "/model/sam_onnx_quantized_example.onnx";
|
||||
```
|
||||
|
||||
## ONNX multithreading with SharedArrayBuffer
|
||||
|
||||
To use multithreading, the appropriate headers need to be set to create a cross origin isolation state which will enable use of `SharedArrayBuffer` (see this [blog post](https://cloudblogs.microsoft.com/opensource/2021/09/02/onnx-runtime-web-running-your-machine-learning-model-in-browser/) for more details)
|
||||
|
||||
The headers below are set in `configs/webpack/dev.js`:
|
||||
|
||||
```js
|
||||
headers: {
|
||||
"Cross-Origin-Opener-Policy": "same-origin",
|
||||
"Cross-Origin-Embedder-Policy": "credentialless",
|
||||
}
|
||||
```
|
||||
|
||||
## Structure of the app
|
||||
|
||||
**`App.tsx`**
|
||||
|
||||
- Initializes ONNX model
|
||||
- Loads image embedding and image
|
||||
- Runs the ONNX model based on input prompts
|
||||
|
||||
**`Stage.tsx`**
|
||||
|
||||
- Handles mouse move interaction to update the ONNX model prompt
|
||||
|
||||
**`Tool.tsx`**
|
||||
|
||||
- Renders the image and the mask prediction
|
||||
|
||||
**`helpers/maskUtils.tsx`**
|
||||
|
||||
- Conversion of ONNX model output from array to an HTMLImageElement
|
||||
|
||||
**`helpers/onnxModelAPI.tsx`**
|
||||
|
||||
- Formats the inputs for the ONNX model
|
||||
|
||||
**`helpers/scaleHelper.tsx`**
|
||||
|
||||
- Handles image scaling logic for SAM (longest size 1024)
|
||||
|
||||
**`hooks/`**
|
||||
|
||||
- Handle shared state for the app
|
||||
@@ -0,0 +1,84 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
const { resolve } = require("path");
|
||||
const HtmlWebpackPlugin = require("html-webpack-plugin");
|
||||
const FriendlyErrorsWebpackPlugin = require("friendly-errors-webpack-plugin");
|
||||
const CopyPlugin = require("copy-webpack-plugin");
|
||||
const webpack = require("webpack");
|
||||
|
||||
module.exports = {
|
||||
entry: "./src/index.tsx",
|
||||
resolve: {
|
||||
extensions: [".js", ".jsx", ".ts", ".tsx"],
|
||||
},
|
||||
output: {
|
||||
path: resolve(__dirname, "dist"),
|
||||
},
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.mjs$/,
|
||||
include: /node_modules/,
|
||||
type: "javascript/auto",
|
||||
resolve: {
|
||||
fullySpecified: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
test: [/\.jsx?$/, /\.tsx?$/],
|
||||
use: ["ts-loader"],
|
||||
exclude: /node_modules/,
|
||||
},
|
||||
{
|
||||
test: /\.css$/,
|
||||
use: ["style-loader", "css-loader"],
|
||||
},
|
||||
{
|
||||
test: /\.(scss|sass)$/,
|
||||
use: ["style-loader", "css-loader", "postcss-loader"],
|
||||
},
|
||||
{
|
||||
test: /\.(jpe?g|png|gif|svg)$/i,
|
||||
use: [
|
||||
"file-loader?hash=sha512&digest=hex&name=img/[contenthash].[ext]",
|
||||
"image-webpack-loader?bypassOnDebug&optipng.optimizationLevel=7&gifsicle.interlaced=false",
|
||||
],
|
||||
},
|
||||
{
|
||||
test: /\.(woff|woff2|ttf)$/,
|
||||
use: {
|
||||
loader: "url-loader",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
plugins: [
|
||||
new CopyPlugin({
|
||||
patterns: [
|
||||
{
|
||||
from: "node_modules/onnxruntime-web/dist/*.wasm",
|
||||
to: "[name][ext]",
|
||||
},
|
||||
{
|
||||
from: "model",
|
||||
to: "model",
|
||||
},
|
||||
{
|
||||
from: "src/assets",
|
||||
to: "assets",
|
||||
},
|
||||
],
|
||||
}),
|
||||
new HtmlWebpackPlugin({
|
||||
template: "./src/assets/index.html",
|
||||
}),
|
||||
new FriendlyErrorsWebpackPlugin(),
|
||||
new webpack.ProvidePlugin({
|
||||
process: "process/browser",
|
||||
}),
|
||||
],
|
||||
};
|
||||
@@ -0,0 +1,25 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
// development config
|
||||
const { merge } = require("webpack-merge");
|
||||
const commonConfig = require("./common");
|
||||
|
||||
module.exports = merge(commonConfig, {
|
||||
mode: "development",
|
||||
devServer: {
|
||||
hot: true, // enable HMR on the server
|
||||
open: true,
|
||||
// These headers enable the cross origin isolation state
|
||||
// needed to enable use of SharedArrayBuffer for ONNX
|
||||
// multithreading.
|
||||
headers: {
|
||||
"Cross-Origin-Opener-Policy": "same-origin",
|
||||
"Cross-Origin-Embedder-Policy": "credentialless",
|
||||
},
|
||||
},
|
||||
devtool: "cheap-module-source-map",
|
||||
});
|
||||
@@ -0,0 +1,22 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
// production config
|
||||
const { merge } = require("webpack-merge");
|
||||
const { resolve } = require("path");
|
||||
const Dotenv = require("dotenv-webpack");
|
||||
const commonConfig = require("./common");
|
||||
|
||||
module.exports = merge(commonConfig, {
|
||||
mode: "production",
|
||||
output: {
|
||||
filename: "js/bundle.[contenthash].min.js",
|
||||
path: resolve(__dirname, "../../dist"),
|
||||
publicPath: "/",
|
||||
},
|
||||
devtool: "source-map",
|
||||
plugins: [new Dotenv()],
|
||||
});
|
||||
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"name": "segment-anything-mini-demo",
|
||||
"version": "0.1.0",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"build": "yarn run clean-dist && webpack --config=configs/webpack/prod.js && mv dist/*.wasm dist/js",
|
||||
"clean-dist": "rimraf dist/*",
|
||||
"lint": "eslint './src/**/*.{js,ts,tsx}' --quiet",
|
||||
"start": "yarn run start-dev",
|
||||
"test": "yarn run start-model-test",
|
||||
"start-dev": "webpack serve --config=configs/webpack/dev.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.18.13",
|
||||
"@babel/preset-env": "^7.18.10",
|
||||
"@babel/preset-react": "^7.18.6",
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"@pmmmwh/react-refresh-webpack-plugin": "^0.5.7",
|
||||
"@testing-library/react": "^13.3.0",
|
||||
"@types/node": "^18.7.13",
|
||||
"@types/react": "^18.0.17",
|
||||
"@types/react-dom": "^18.0.6",
|
||||
"@types/underscore": "^1.11.4",
|
||||
"@typescript-eslint/eslint-plugin": "^5.35.1",
|
||||
"@typescript-eslint/parser": "^5.35.1",
|
||||
"babel-loader": "^8.2.5",
|
||||
"copy-webpack-plugin": "^11.0.0",
|
||||
"css-loader": "^6.7.1",
|
||||
"dotenv": "^16.0.2",
|
||||
"dotenv-webpack": "^8.0.1",
|
||||
"eslint": "^8.22.0",
|
||||
"eslint-plugin-react": "^7.31.0",
|
||||
"file-loader": "^6.2.0",
|
||||
"fork-ts-checker-webpack-plugin": "^7.2.13",
|
||||
"friendly-errors-webpack-plugin": "^1.7.0",
|
||||
"html-webpack-plugin": "^5.5.0",
|
||||
"image-webpack-loader": "^8.1.0",
|
||||
"postcss-loader": "^7.0.1",
|
||||
"postcss-preset-env": "^7.8.0",
|
||||
"process": "^0.11.10",
|
||||
"rimraf": "^3.0.2",
|
||||
"sass": "^1.54.5",
|
||||
"sass-loader": "^13.0.2",
|
||||
"style-loader": "^3.3.1",
|
||||
"tailwindcss": "^3.1.8",
|
||||
"ts-loader": "^9.3.1",
|
||||
"typescript": "^4.8.2",
|
||||
"webpack": "^5.74.0",
|
||||
"webpack-cli": "^4.10.0",
|
||||
"webpack-dev-server": "^4.10.0",
|
||||
"webpack-dotenv-plugin": "^2.1.0",
|
||||
"webpack-merge": "^5.8.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"npyjs": "^0.4.0",
|
||||
"onnxruntime-web": "^1.14.0",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"underscore": "^1.13.6",
|
||||
"react-refresh": "^0.14.0"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
const tailwindcss = require("tailwindcss");
|
||||
module.exports = {
|
||||
plugins: ["postcss-preset-env", 'tailwindcss/nesting', tailwindcss],
|
||||
};
|
||||
130
custom_nodes/was-node-suite-comfyui/repos/SAM/demo/src/App.tsx
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import { InferenceSession, Tensor } from "onnxruntime-web";
|
||||
import React, { useContext, useEffect, useState } from "react";
|
||||
import "./assets/scss/App.scss";
|
||||
import { handleImageScale } from "./components/helpers/scaleHelper";
|
||||
import { modelScaleProps } from "./components/helpers/Interfaces";
|
||||
import { onnxMaskToImage } from "./components/helpers/maskUtils";
|
||||
import { modelData } from "./components/helpers/onnxModelAPI";
|
||||
import Stage from "./components/Stage";
|
||||
import AppContext from "./components/hooks/createContext";
|
||||
const ort = require("onnxruntime-web");
|
||||
/* @ts-ignore */
|
||||
import npyjs from "npyjs";
|
||||
|
||||
// Define image, embedding and model paths
|
||||
const IMAGE_PATH = "/assets/data/dogs.jpg";
|
||||
const IMAGE_EMBEDDING = "/assets/data/dogs_embedding.npy";
|
||||
const MODEL_DIR = "/model/sam_onnx_quantized_example.onnx";
|
||||
|
||||
const App = () => {
|
||||
const {
|
||||
clicks: [clicks],
|
||||
image: [, setImage],
|
||||
maskImg: [, setMaskImg],
|
||||
} = useContext(AppContext)!;
|
||||
const [model, setModel] = useState<InferenceSession | null>(null); // ONNX model
|
||||
const [tensor, setTensor] = useState<Tensor | null>(null); // Image embedding tensor
|
||||
|
||||
// The ONNX model expects the input to be rescaled to 1024.
|
||||
// The modelScale state variable keeps track of the scale values.
|
||||
const [modelScale, setModelScale] = useState<modelScaleProps | null>(null);
|
||||
|
||||
// Initialize the ONNX model. load the image, and load the SAM
|
||||
// pre-computed image embedding
|
||||
useEffect(() => {
|
||||
// Initialize the ONNX model
|
||||
const initModel = async () => {
|
||||
try {
|
||||
if (MODEL_DIR === undefined) return;
|
||||
const URL: string = MODEL_DIR;
|
||||
const model = await InferenceSession.create(URL);
|
||||
setModel(model);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
initModel();
|
||||
|
||||
// Load the image
|
||||
const url = new URL(IMAGE_PATH, location.origin);
|
||||
loadImage(url);
|
||||
|
||||
// Load the Segment Anything pre-computed embedding
|
||||
Promise.resolve(loadNpyTensor(IMAGE_EMBEDDING, "float32")).then(
|
||||
(embedding) => setTensor(embedding)
|
||||
);
|
||||
}, []);
|
||||
|
||||
const loadImage = async (url: URL) => {
|
||||
try {
|
||||
const img = new Image();
|
||||
img.src = url.href;
|
||||
img.onload = () => {
|
||||
const { height, width, samScale } = handleImageScale(img);
|
||||
setModelScale({
|
||||
height: height, // original image height
|
||||
width: width, // original image width
|
||||
samScale: samScale, // scaling factor for image which has been resized to longest side 1024
|
||||
});
|
||||
img.width = width;
|
||||
img.height = height;
|
||||
setImage(img);
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
};
|
||||
|
||||
// Decode a Numpy file into a tensor.
|
||||
const loadNpyTensor = async (tensorFile: string, dType: string) => {
|
||||
let npLoader = new npyjs();
|
||||
const npArray = await npLoader.load(tensorFile);
|
||||
const tensor = new ort.Tensor(dType, npArray.data, npArray.shape);
|
||||
return tensor;
|
||||
};
|
||||
|
||||
// Run the ONNX model every time clicks has changed
|
||||
useEffect(() => {
|
||||
runONNX();
|
||||
}, [clicks]);
|
||||
|
||||
const runONNX = async () => {
|
||||
try {
|
||||
if (
|
||||
model === null ||
|
||||
clicks === null ||
|
||||
tensor === null ||
|
||||
modelScale === null
|
||||
)
|
||||
return;
|
||||
else {
|
||||
// Preapre the model input in the correct format for SAM.
|
||||
// The modelData function is from onnxModelAPI.tsx.
|
||||
const feeds = modelData({
|
||||
clicks,
|
||||
tensor,
|
||||
modelScale,
|
||||
});
|
||||
if (feeds === undefined) return;
|
||||
// Run the SAM ONNX model with the feeds returned from modelData()
|
||||
const results = await model.run(feeds);
|
||||
const output = results[model.outputNames[0]];
|
||||
// The predicted mask returned from the ONNX model is an array which is
|
||||
// rendered as an HTML image using onnxMaskToImage() from maskUtils.tsx.
|
||||
setMaskImg(onnxMaskToImage(output.data, output.dims[2], output.dims[3]));
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
|
||||
return <Stage />;
|
||||
};
|
||||
|
||||
export default App;
|
||||
|
After Width: | Height: | Size: 438 KiB |
@@ -0,0 +1,18 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" dir="ltr" prefix="og: https://ogp.me/ns#" class="w-full h-full">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta
|
||||
name="viewport"
|
||||
content="width=device-width, initial-scale=1, shrink-to-fit=no"
|
||||
/>
|
||||
<title>Segment Anything Demo</title>
|
||||
|
||||
<!-- Meta Tags -->
|
||||
<meta property="og:type" content="website" />
|
||||
<meta property="og:title" content="Segment Anything Demo" />
|
||||
</head>
|
||||
<body class="w-full h-full">
|
||||
<div id="root" class="w-full h-full"></div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -0,0 +1,3 @@
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
@@ -0,0 +1,49 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import React, { useContext } from "react";
|
||||
import * as _ from "underscore";
|
||||
import Tool from "./Tool";
|
||||
import { modelInputProps } from "./helpers/Interfaces";
|
||||
import AppContext from "./hooks/createContext";
|
||||
|
||||
const Stage = () => {
|
||||
const {
|
||||
clicks: [, setClicks],
|
||||
image: [image],
|
||||
} = useContext(AppContext)!;
|
||||
|
||||
const getClick = (x: number, y: number): modelInputProps => {
|
||||
const clickType = 1;
|
||||
return { x, y, clickType };
|
||||
};
|
||||
|
||||
// Get mouse position and scale the (x, y) coordinates back to the natural
|
||||
// scale of the image. Update the state of clicks with setClicks to trigger
|
||||
// the ONNX model to run and generate a new mask via a useEffect in App.tsx
|
||||
const handleMouseMove = _.throttle((e: any) => {
|
||||
let el = e.nativeEvent.target;
|
||||
const rect = el.getBoundingClientRect();
|
||||
let x = e.clientX - rect.left;
|
||||
let y = e.clientY - rect.top;
|
||||
const imageScale = image ? image.width / el.offsetWidth : 1;
|
||||
x *= imageScale;
|
||||
y *= imageScale;
|
||||
const click = getClick(x, y);
|
||||
if (click) setClicks([click]);
|
||||
}, 15);
|
||||
|
||||
const flexCenterClasses = "flex items-center justify-center";
|
||||
return (
|
||||
<div className={`${flexCenterClasses} w-full h-full`}>
|
||||
<div className={`${flexCenterClasses} relative w-[90%] h-[90%]`}>
|
||||
<Tool handleMouseMove={handleMouseMove} />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Stage;
|
||||
@@ -0,0 +1,73 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import React, { useContext, useEffect, useState } from "react";
|
||||
import AppContext from "./hooks/createContext";
|
||||
import { ToolProps } from "./helpers/Interfaces";
|
||||
import * as _ from "underscore";
|
||||
|
||||
const Tool = ({ handleMouseMove }: ToolProps) => {
|
||||
const {
|
||||
image: [image],
|
||||
maskImg: [maskImg, setMaskImg],
|
||||
} = useContext(AppContext)!;
|
||||
|
||||
// Determine if we should shrink or grow the images to match the
|
||||
// width or the height of the page and setup a ResizeObserver to
|
||||
// monitor changes in the size of the page
|
||||
const [shouldFitToWidth, setShouldFitToWidth] = useState(true);
|
||||
const bodyEl = document.body;
|
||||
const fitToPage = () => {
|
||||
if (!image) return;
|
||||
const imageAspectRatio = image.width / image.height;
|
||||
const screenAspectRatio = window.innerWidth / window.innerHeight;
|
||||
setShouldFitToWidth(imageAspectRatio > screenAspectRatio);
|
||||
};
|
||||
const resizeObserver = new ResizeObserver((entries) => {
|
||||
for (const entry of entries) {
|
||||
if (entry.target === bodyEl) {
|
||||
fitToPage();
|
||||
}
|
||||
}
|
||||
});
|
||||
useEffect(() => {
|
||||
fitToPage();
|
||||
resizeObserver.observe(bodyEl);
|
||||
return () => {
|
||||
resizeObserver.unobserve(bodyEl);
|
||||
};
|
||||
}, [image]);
|
||||
|
||||
const imageClasses = "";
|
||||
const maskImageClasses = `absolute opacity-40 pointer-events-none`;
|
||||
|
||||
// Render the image and the predicted mask image on top
|
||||
return (
|
||||
<>
|
||||
{image && (
|
||||
<img
|
||||
onMouseMove={handleMouseMove}
|
||||
onMouseOut={() => _.defer(() => setMaskImg(null))}
|
||||
onTouchStart={handleMouseMove}
|
||||
src={image.src}
|
||||
className={`${
|
||||
shouldFitToWidth ? "w-full" : "h-full"
|
||||
} ${imageClasses}`}
|
||||
></img>
|
||||
)}
|
||||
{maskImg && (
|
||||
<img
|
||||
src={maskImg.src}
|
||||
className={`${
|
||||
shouldFitToWidth ? "w-full" : "h-full"
|
||||
} ${maskImageClasses}`}
|
||||
></img>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default Tool;
|
||||
@@ -0,0 +1,29 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import { Tensor } from "onnxruntime-web";
|
||||
|
||||
export interface modelScaleProps {
|
||||
samScale: number;
|
||||
height: number;
|
||||
width: number;
|
||||
}
|
||||
|
||||
export interface modelInputProps {
|
||||
x: number;
|
||||
y: number;
|
||||
clickType: number;
|
||||
}
|
||||
|
||||
export interface modeDataProps {
|
||||
clicks?: Array<modelInputProps>;
|
||||
tensor: Tensor;
|
||||
modelScale: modelScaleProps;
|
||||
}
|
||||
|
||||
export interface ToolProps {
|
||||
handleMouseMove: (e: any) => void;
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
// Convert the onnx model mask prediction to ImageData
|
||||
function arrayToImageData(input: any, width: number, height: number) {
|
||||
const [r, g, b, a] = [0, 114, 189, 255]; // the masks's blue color
|
||||
const arr = new Uint8ClampedArray(4 * width * height).fill(0);
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
|
||||
// Threshold the onnx model mask prediction at 0.0
|
||||
// This is equivalent to thresholding the mask using predictor.model.mask_threshold
|
||||
// in python
|
||||
if (input[i] > 0.0) {
|
||||
arr[4 * i + 0] = r;
|
||||
arr[4 * i + 1] = g;
|
||||
arr[4 * i + 2] = b;
|
||||
arr[4 * i + 3] = a;
|
||||
}
|
||||
}
|
||||
return new ImageData(arr, height, width);
|
||||
}
|
||||
|
||||
// Use a Canvas element to produce an image from ImageData
|
||||
function imageDataToImage(imageData: ImageData) {
|
||||
const canvas = imageDataToCanvas(imageData);
|
||||
const image = new Image();
|
||||
image.src = canvas.toDataURL();
|
||||
return image;
|
||||
}
|
||||
|
||||
// Canvas elements can be created from ImageData
|
||||
function imageDataToCanvas(imageData: ImageData) {
|
||||
const canvas = document.createElement("canvas");
|
||||
const ctx = canvas.getContext("2d");
|
||||
canvas.width = imageData.width;
|
||||
canvas.height = imageData.height;
|
||||
ctx?.putImageData(imageData, 0, 0);
|
||||
return canvas;
|
||||
}
|
||||
|
||||
// Convert the onnx model mask output to an HTMLImageElement
|
||||
export function onnxMaskToImage(input: any, width: number, height: number) {
|
||||
return imageDataToImage(arrayToImageData(input, width, height));
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import { Tensor } from "onnxruntime-web";
|
||||
import { modeDataProps } from "./Interfaces";
|
||||
|
||||
const modelData = ({ clicks, tensor, modelScale }: modeDataProps) => {
|
||||
const imageEmbedding = tensor;
|
||||
let pointCoords;
|
||||
let pointLabels;
|
||||
let pointCoordsTensor;
|
||||
let pointLabelsTensor;
|
||||
|
||||
// Check there are input click prompts
|
||||
if (clicks) {
|
||||
let n = clicks.length;
|
||||
|
||||
// If there is no box input, a single padding point with
|
||||
// label -1 and coordinates (0.0, 0.0) should be concatenated
|
||||
// so initialize the array to support (n + 1) points.
|
||||
pointCoords = new Float32Array(2 * (n + 1));
|
||||
pointLabels = new Float32Array(n + 1);
|
||||
|
||||
// Add clicks and scale to what SAM expects
|
||||
for (let i = 0; i < n; i++) {
|
||||
pointCoords[2 * i] = clicks[i].x * modelScale.samScale;
|
||||
pointCoords[2 * i + 1] = clicks[i].y * modelScale.samScale;
|
||||
pointLabels[i] = clicks[i].clickType;
|
||||
}
|
||||
|
||||
// Add in the extra point/label when only clicks and no box
|
||||
// The extra point is at (0, 0) with label -1
|
||||
pointCoords[2 * n] = 0.0;
|
||||
pointCoords[2 * n + 1] = 0.0;
|
||||
pointLabels[n] = -1.0;
|
||||
|
||||
// Create the tensor
|
||||
pointCoordsTensor = new Tensor("float32", pointCoords, [1, n + 1, 2]);
|
||||
pointLabelsTensor = new Tensor("float32", pointLabels, [1, n + 1]);
|
||||
}
|
||||
const imageSizeTensor = new Tensor("float32", [
|
||||
modelScale.height,
|
||||
modelScale.width,
|
||||
]);
|
||||
|
||||
if (pointCoordsTensor === undefined || pointLabelsTensor === undefined)
|
||||
return;
|
||||
|
||||
// There is no previous mask, so default to an empty tensor
|
||||
const maskInput = new Tensor(
|
||||
"float32",
|
||||
new Float32Array(256 * 256),
|
||||
[1, 1, 256, 256]
|
||||
);
|
||||
// There is no previous mask, so default to 0
|
||||
const hasMaskInput = new Tensor("float32", [0]);
|
||||
|
||||
return {
|
||||
image_embeddings: imageEmbedding,
|
||||
point_coords: pointCoordsTensor,
|
||||
point_labels: pointLabelsTensor,
|
||||
orig_im_size: imageSizeTensor,
|
||||
mask_input: maskInput,
|
||||
has_mask_input: hasMaskInput,
|
||||
};
|
||||
};
|
||||
|
||||
export { modelData };
|
||||
@@ -0,0 +1,18 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
// Helper function for handling image scaling needed for SAM
|
||||
const handleImageScale = (image: HTMLImageElement) => {
|
||||
// Input images to SAM must be resized so the longest side is 1024
|
||||
const LONG_SIDE_LENGTH = 1024;
|
||||
let w = image.naturalWidth;
|
||||
let h = image.naturalHeight;
|
||||
const samScale = LONG_SIDE_LENGTH / Math.max(h, w);
|
||||
return { height: h, width: w, samScale };
|
||||
};
|
||||
|
||||
export { handleImageScale };
|
||||
@@ -0,0 +1,31 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import React, { useState } from "react";
|
||||
import { modelInputProps } from "../helpers/Interfaces";
|
||||
import AppContext from "./createContext";
|
||||
|
||||
const AppContextProvider = (props: {
|
||||
children: React.ReactElement<any, string | React.JSXElementConstructor<any>>;
|
||||
}) => {
|
||||
const [clicks, setClicks] = useState<Array<modelInputProps> | null>(null);
|
||||
const [image, setImage] = useState<HTMLImageElement | null>(null);
|
||||
const [maskImg, setMaskImg] = useState<HTMLImageElement | null>(null);
|
||||
|
||||
return (
|
||||
<AppContext.Provider
|
||||
value={{
|
||||
clicks: [clicks, setClicks],
|
||||
image: [image, setImage],
|
||||
maskImg: [maskImg, setMaskImg],
|
||||
}}
|
||||
>
|
||||
{props.children}
|
||||
</AppContext.Provider>
|
||||
);
|
||||
};
|
||||
|
||||
export default AppContextProvider;
|
||||
@@ -0,0 +1,27 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import { createContext } from "react";
|
||||
import { modelInputProps } from "../helpers/Interfaces";
|
||||
|
||||
interface contextProps {
|
||||
clicks: [
|
||||
clicks: modelInputProps[] | null,
|
||||
setClicks: (e: modelInputProps[] | null) => void
|
||||
];
|
||||
image: [
|
||||
image: HTMLImageElement | null,
|
||||
setImage: (e: HTMLImageElement | null) => void
|
||||
];
|
||||
maskImg: [
|
||||
maskImg: HTMLImageElement | null,
|
||||
setMaskImg: (e: HTMLImageElement | null) => void
|
||||
];
|
||||
}
|
||||
|
||||
const AppContext = createContext<contextProps | null>(null);
|
||||
|
||||
export default AppContext;
|
||||
@@ -0,0 +1,17 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
import * as React from "react";
|
||||
import { createRoot } from "react-dom/client";
|
||||
import AppContextProvider from "./components/hooks/context";
|
||||
import App from "./App";
|
||||
const container = document.getElementById("root");
|
||||
const root = createRoot(container!);
|
||||
root.render(
|
||||
<AppContextProvider>
|
||||
<App/>
|
||||
</AppContextProvider>
|
||||
);
|
||||
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
// All rights reserved.
|
||||
|
||||
// This source code is licensed under the license found in the
|
||||
// LICENSE file in the root directory of this source tree.
|
||||
|
||||
/** @type {import('tailwindcss').Config} */
|
||||
module.exports = {
|
||||
content: ["./src/**/*.{html,js,tsx}"],
|
||||
theme: {},
|
||||
plugins: [],
|
||||
};
|
||||
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["dom", "dom.iterable", "esnext"],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"noEmit": false,
|
||||
"esModuleInterop": true,
|
||||
"module": "esnext",
|
||||
"moduleResolution": "node",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "react",
|
||||
"incremental": true,
|
||||
"target": "ESNext",
|
||||
"useDefineForClassFields": true,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"outDir": "./dist/",
|
||||
"sourceMap": true
|
||||
},
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", "src"],
|
||||
"exclude": ["node_modules"]
|
||||
}
|
||||
32
custom_nodes/was-node-suite-comfyui/repos/SAM/linter.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
|
||||
{
|
||||
black --version | grep -E "23\." > /dev/null
|
||||
} || {
|
||||
echo "Linter requires 'black==23.*' !"
|
||||
exit 1
|
||||
}
|
||||
|
||||
ISORT_VERSION=$(isort --version-number)
|
||||
if [[ "$ISORT_VERSION" != 5.12* ]]; then
|
||||
echo "Linter requires isort==5.12.0 !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Running isort ..."
|
||||
isort . --atomic
|
||||
|
||||
echo "Running black ..."
|
||||
black -l 100 .
|
||||
|
||||
echo "Running flake8 ..."
|
||||
if [ -x "$(command -v flake8)" ]; then
|
||||
flake8 .
|
||||
else
|
||||
python3 -m flake8 .
|
||||
fi
|
||||
|
||||
echo "Running mypy..."
|
||||
|
||||
mypy --exclude 'setup.py|notebooks' .
|
||||
|
After Width: | Height: | Size: 98 KiB |
|
After Width: | Height: | Size: 164 KiB |
|
After Width: | Height: | Size: 265 KiB |
@@ -0,0 +1,774 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "901c8ef3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1662bb7c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Produces masks from prompts using an ONNX model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7fcc21a0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"SAM's prompt encoder and mask decoder are very lightweight, which allows for efficient computation of a mask given user input. This notebook shows an example of how to export and use this lightweight component of the model in ONNX format, allowing it to run on a variety of platforms that support an ONNX runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "86daff77",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
"<a target=\"_blank\" href=\"https://colab.research.google.com/github/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb\">\n",
|
||||
" <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
|
||||
"</a>\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from IPython.display import display, HTML\n",
|
||||
"display(HTML(\n",
|
||||
"\"\"\"\n",
|
||||
"<a target=\"_blank\" href=\"https://colab.research.google.com/github/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb\">\n",
|
||||
" <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
|
||||
"</a>\n",
|
||||
"\"\"\"\n",
|
||||
"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "55ae4e00",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Environment Set-up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "109a5cc2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If running locally using jupyter, first install `segment_anything` in your environment using the [installation instructions](https://github.com/facebookresearch/segment-anything#installation) in the repository. The latest stable versions of PyTorch and ONNX are recommended for this notebook. If running from Google Colab, set `using_colab=True` below and run the cell. In Colab, be sure to select 'GPU' under 'Edit'->'Notebook Settings'->'Hardware accelerator'."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "39b99fc4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"using_colab = False"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "296a69be",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if using_colab:\n",
|
||||
" import torch\n",
|
||||
" import torchvision\n",
|
||||
" print(\"PyTorch version:\", torch.__version__)\n",
|
||||
" print(\"Torchvision version:\", torchvision.__version__)\n",
|
||||
" print(\"CUDA is available:\", torch.cuda.is_available())\n",
|
||||
" import sys\n",
|
||||
" !{sys.executable} -m pip install opencv-python matplotlib onnx onnxruntime\n",
|
||||
" !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/segment-anything.git'\n",
|
||||
" \n",
|
||||
" !mkdir images\n",
|
||||
" !wget -P images https://raw.githubusercontent.com/facebookresearch/segment-anything/main/notebooks/images/truck.jpg\n",
|
||||
" \n",
|
||||
" !wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dc4a58be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set-up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "42396e8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that this notebook requires both the `onnx` and `onnxruntime` optional dependencies, in addition to `opencv-python` and `matplotlib` for visualization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2c712610",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import numpy as np\n",
|
||||
"import cv2\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from segment_anything import sam_model_registry, SamPredictor\n",
|
||||
"from segment_anything.utils.onnx import SamOnnxModel\n",
|
||||
"\n",
|
||||
"import onnxruntime\n",
|
||||
"from onnxruntime.quantization import QuantType\n",
|
||||
"from onnxruntime.quantization.quantize import quantize_dynamic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f29441b9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def show_mask(mask, ax):\n",
|
||||
" color = np.array([30/255, 144/255, 255/255, 0.6])\n",
|
||||
" h, w = mask.shape[-2:]\n",
|
||||
" mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n",
|
||||
" ax.imshow(mask_image)\n",
|
||||
" \n",
|
||||
"def show_points(coords, labels, ax, marker_size=375):\n",
|
||||
" pos_points = coords[labels==1]\n",
|
||||
" neg_points = coords[labels==0]\n",
|
||||
" ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n",
|
||||
" ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) \n",
|
||||
" \n",
|
||||
"def show_box(box, ax):\n",
|
||||
" x0, y0 = box[0], box[1]\n",
|
||||
" w, h = box[2] - box[0], box[3] - box[1]\n",
|
||||
" ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bd0f6b2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Export an ONNX model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1540f719",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set the path below to a SAM model checkpoint, then load the model. This will be needed to both export the model and to calculate embeddings for the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76fc53f4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"checkpoint = \"sam_vit_h_4b8939.pth\"\n",
|
||||
"model_type = \"vit_h\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "11bfc8aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sam = sam_model_registry[model_type](checkpoint=checkpoint)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "450c089c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The script `segment-anything/scripts/export_onnx_model.py` can be used to export the necessary portion of SAM. Alternatively, run the following code to export an ONNX model. If you have already exported a model, set the path below and skip to the next section. Assure that the exported ONNX model aligns with the checkpoint and model type set above. This notebook expects the model was exported with the parameter `return_single_mask=True`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "38a8add8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_model_path = None # Set to use an already exported model, then skip to the next section."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7da638ba",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import warnings\n",
|
||||
"\n",
|
||||
"onnx_model_path = \"sam_onnx_example.onnx\"\n",
|
||||
"\n",
|
||||
"onnx_model = SamOnnxModel(sam, return_single_mask=True)\n",
|
||||
"\n",
|
||||
"dynamic_axes = {\n",
|
||||
" \"point_coords\": {1: \"num_points\"},\n",
|
||||
" \"point_labels\": {1: \"num_points\"},\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"embed_dim = sam.prompt_encoder.embed_dim\n",
|
||||
"embed_size = sam.prompt_encoder.image_embedding_size\n",
|
||||
"mask_input_size = [4 * x for x in embed_size]\n",
|
||||
"dummy_inputs = {\n",
|
||||
" \"image_embeddings\": torch.randn(1, embed_dim, *embed_size, dtype=torch.float),\n",
|
||||
" \"point_coords\": torch.randint(low=0, high=1024, size=(1, 5, 2), dtype=torch.float),\n",
|
||||
" \"point_labels\": torch.randint(low=0, high=4, size=(1, 5), dtype=torch.float),\n",
|
||||
" \"mask_input\": torch.randn(1, 1, *mask_input_size, dtype=torch.float),\n",
|
||||
" \"has_mask_input\": torch.tensor([1], dtype=torch.float),\n",
|
||||
" \"orig_im_size\": torch.tensor([1500, 2250], dtype=torch.float),\n",
|
||||
"}\n",
|
||||
"output_names = [\"masks\", \"iou_predictions\", \"low_res_masks\"]\n",
|
||||
"\n",
|
||||
"with warnings.catch_warnings():\n",
|
||||
" warnings.filterwarnings(\"ignore\", category=torch.jit.TracerWarning)\n",
|
||||
" warnings.filterwarnings(\"ignore\", category=UserWarning)\n",
|
||||
" with open(onnx_model_path, \"wb\") as f:\n",
|
||||
" torch.onnx.export(\n",
|
||||
" onnx_model,\n",
|
||||
" tuple(dummy_inputs.values()),\n",
|
||||
" f,\n",
|
||||
" export_params=True,\n",
|
||||
" verbose=False,\n",
|
||||
" opset_version=17,\n",
|
||||
" do_constant_folding=True,\n",
|
||||
" input_names=list(dummy_inputs.keys()),\n",
|
||||
" output_names=output_names,\n",
|
||||
" dynamic_axes=dynamic_axes,\n",
|
||||
" ) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c450cf1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If desired, the model can additionally be quantized and optimized. We find this improves web runtime significantly for negligible change in qualitative performance. Run the next cell to quantize the model, or skip to the next section otherwise."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "235d39fe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_model_quantized_path = \"sam_onnx_quantized_example.onnx\"\n",
|
||||
"quantize_dynamic(\n",
|
||||
" model_input=onnx_model_path,\n",
|
||||
" model_output=onnx_model_quantized_path,\n",
|
||||
" optimize_model=True,\n",
|
||||
" per_channel=False,\n",
|
||||
" reduce_range=False,\n",
|
||||
" weight_type=QuantType.QUInt8,\n",
|
||||
")\n",
|
||||
"onnx_model_path = onnx_model_quantized_path"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "927a928b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example Image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6be6eb55",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image = cv2.imread('images/truck.jpg')\n",
|
||||
"image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b7e9a27a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize=(10,10))\n",
|
||||
"plt.imshow(image)\n",
|
||||
"plt.axis('on')\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "027b177b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using an ONNX model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "778d4593",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here as an example, we use `onnxruntime` in python on CPU to execute the ONNX model. However, any platform that supports an ONNX runtime could be used in principle. Launch the runtime session below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9689b1bf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ort_session = onnxruntime.InferenceSession(onnx_model_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7708ead6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the ONNX model, the image must first be pre-processed using the SAM image encoder. This is a heavier weight process best performed on GPU. SamPredictor can be used as normal, then `.get_image_embedding()` will retreive the intermediate features."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "26e067b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sam.to(device='cuda')\n",
|
||||
"predictor = SamPredictor(sam)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7ad3f0d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predictor.set_image(image)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8a6f0f07",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_embedding = predictor.get_image_embedding().cpu().numpy()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5e112f33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_embedding.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6337b654",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The ONNX model has a different input signature than `SamPredictor.predict`. The following inputs must all be supplied. Note the special cases for both point and mask inputs. All inputs are `np.float32`.\n",
|
||||
"* `image_embeddings`: The image embedding from `predictor.get_image_embedding()`. Has a batch index of length 1.\n",
|
||||
"* `point_coords`: Coordinates of sparse input prompts, corresponding to both point inputs and box inputs. Boxes are encoded using two points, one for the top-left corner and one for the bottom-right corner. *Coordinates must already be transformed to long-side 1024.* Has a batch index of length 1.\n",
|
||||
"* `point_labels`: Labels for the sparse input prompts. 0 is a negative input point, 1 is a positive input point, 2 is a top-left box corner, 3 is a bottom-right box corner, and -1 is a padding point. *If there is no box input, a single padding point with label -1 and coordinates (0.0, 0.0) should be concatenated.*\n",
|
||||
"* `mask_input`: A mask input to the model with shape 1x1x256x256. This must be supplied even if there is no mask input. In this case, it can just be zeros.\n",
|
||||
"* `has_mask_input`: An indicator for the mask input. 1 indicates a mask input, 0 indicates no mask input.\n",
|
||||
"* `orig_im_size`: The size of the input image in (H,W) format, before any transformation. \n",
|
||||
"\n",
|
||||
"Additionally, the ONNX model does not threshold the output mask logits. To obtain a binary mask, threshold at `sam.mask_threshold` (equal to 0.0)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bf5a9f55",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example point input"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1c0deef0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_point = np.array([[500, 375]])\n",
|
||||
"input_label = np.array([1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7256394c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add a batch index, concatenate a padding point, and transform."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f69903e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_coord = np.concatenate([input_point, np.array([[0.0, 0.0]])], axis=0)[None, :, :]\n",
|
||||
"onnx_label = np.concatenate([input_label, np.array([-1])], axis=0)[None, :].astype(np.float32)\n",
|
||||
"\n",
|
||||
"onnx_coord = predictor.transform.apply_coords(onnx_coord, image.shape[:2]).astype(np.float32)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b188dc53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create an empty mask input and an indicator for no mask."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5cb52bcf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_mask_input = np.zeros((1, 1, 256, 256), dtype=np.float32)\n",
|
||||
"onnx_has_mask_input = np.zeros(1, dtype=np.float32)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a99c2cc5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Package the inputs to run in the onnx model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1d7ea11",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ort_inputs = {\n",
|
||||
" \"image_embeddings\": image_embedding,\n",
|
||||
" \"point_coords\": onnx_coord,\n",
|
||||
" \"point_labels\": onnx_label,\n",
|
||||
" \"mask_input\": onnx_mask_input,\n",
|
||||
" \"has_mask_input\": onnx_has_mask_input,\n",
|
||||
" \"orig_im_size\": np.array(image.shape[:2], dtype=np.float32)\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6409c9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Predict a mask and threshold it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dc4cc082",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"masks, _, low_res_logits = ort_session.run(None, ort_inputs)\n",
|
||||
"masks = masks > predictor.model.mask_threshold"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d778a8fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"masks.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "badb1175",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize=(10,10))\n",
|
||||
"plt.imshow(image)\n",
|
||||
"show_mask(masks, plt.gca())\n",
|
||||
"show_points(input_point, input_label, plt.gca())\n",
|
||||
"plt.axis('off')\n",
|
||||
"plt.show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f1d4d15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example mask input"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b319da82",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_point = np.array([[500, 375], [1125, 625]])\n",
|
||||
"input_label = np.array([1, 1])\n",
|
||||
"\n",
|
||||
"# Use the mask output from the previous run. It is already in the correct form for input to the ONNX model.\n",
|
||||
"onnx_mask_input = low_res_logits"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b1823b37",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Transform the points as in the previous example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8885130f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_coord = np.concatenate([input_point, np.array([[0.0, 0.0]])], axis=0)[None, :, :]\n",
|
||||
"onnx_label = np.concatenate([input_label, np.array([-1])], axis=0)[None, :].astype(np.float32)\n",
|
||||
"\n",
|
||||
"onnx_coord = predictor.transform.apply_coords(onnx_coord, image.shape[:2]).astype(np.float32)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "28e47b69",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `has_mask_input` indicator is now 1."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3ab4483a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_has_mask_input = np.ones(1, dtype=np.float32)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d3781955",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Package inputs, then predict and threshold the mask."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0c1ec096",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ort_inputs = {\n",
|
||||
" \"image_embeddings\": image_embedding,\n",
|
||||
" \"point_coords\": onnx_coord,\n",
|
||||
" \"point_labels\": onnx_label,\n",
|
||||
" \"mask_input\": onnx_mask_input,\n",
|
||||
" \"has_mask_input\": onnx_has_mask_input,\n",
|
||||
" \"orig_im_size\": np.array(image.shape[:2], dtype=np.float32)\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"masks, _, _ = ort_session.run(None, ort_inputs)\n",
|
||||
"masks = masks > predictor.model.mask_threshold"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1e36554b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize=(10,10))\n",
|
||||
"plt.imshow(image)\n",
|
||||
"show_mask(masks, plt.gca())\n",
|
||||
"show_points(input_point, input_label, plt.gca())\n",
|
||||
"plt.axis('off')\n",
|
||||
"plt.show() "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2ef211d0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example box and point input"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51e58d2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_box = np.array([425, 600, 700, 875])\n",
|
||||
"input_point = np.array([[575, 750]])\n",
|
||||
"input_label = np.array([0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6e119dcb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add a batch index, concatenate a box and point inputs, add the appropriate labels for the box corners, and transform. There is no padding point since the input includes a box input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bfbe4911",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_box_coords = input_box.reshape(2, 2)\n",
|
||||
"onnx_box_labels = np.array([2,3])\n",
|
||||
"\n",
|
||||
"onnx_coord = np.concatenate([input_point, onnx_box_coords], axis=0)[None, :, :]\n",
|
||||
"onnx_label = np.concatenate([input_label, onnx_box_labels], axis=0)[None, :].astype(np.float32)\n",
|
||||
"\n",
|
||||
"onnx_coord = predictor.transform.apply_coords(onnx_coord, image.shape[:2]).astype(np.float32)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "65edabd2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Package inputs, then predict and threshold the mask."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2abfba56",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_mask_input = np.zeros((1, 1, 256, 256), dtype=np.float32)\n",
|
||||
"onnx_has_mask_input = np.zeros(1, dtype=np.float32)\n",
|
||||
"\n",
|
||||
"ort_inputs = {\n",
|
||||
" \"image_embeddings\": image_embedding,\n",
|
||||
" \"point_coords\": onnx_coord,\n",
|
||||
" \"point_labels\": onnx_label,\n",
|
||||
" \"mask_input\": onnx_mask_input,\n",
|
||||
" \"has_mask_input\": onnx_has_mask_input,\n",
|
||||
" \"orig_im_size\": np.array(image.shape[:2], dtype=np.float32)\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"masks, _, _ = ort_session.run(None, ort_inputs)\n",
|
||||
"masks = masks > predictor.model.mask_threshold"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8301bf33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(image)\n",
|
||||
"show_mask(masks[0], plt.gca())\n",
|
||||
"show_box(input_box, plt.gca())\n",
|
||||
"show_points(input_point, input_label, plt.gca())\n",
|
||||
"plt.axis('off')\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
238
custom_nodes/was-node-suite-comfyui/repos/SAM/scripts/amg.py
Normal file
@@ -0,0 +1,238 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import cv2 # type: ignore
|
||||
|
||||
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Runs automatic mask generation on an input image or directory of images, "
|
||||
"and outputs masks as either PNGs or COCO-style RLEs. Requires open-cv, "
|
||||
"as well as pycocotools if saving in RLE format."
|
||||
)
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--input",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to either a single input image or folder of images.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
type=str,
|
||||
required=True,
|
||||
help=(
|
||||
"Path to the directory where masks will be output. Output will be either a folder "
|
||||
"of PNGs per image or a single json with COCO-style masks."
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--model-type",
|
||||
type=str,
|
||||
required=True,
|
||||
help="The type of model to load, in ['default', 'vit_h', 'vit_l', 'vit_b']",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--checkpoint",
|
||||
type=str,
|
||||
required=True,
|
||||
help="The path to the SAM checkpoint to use for mask generation.",
|
||||
)
|
||||
|
||||
parser.add_argument("--device", type=str, default="cuda", help="The device to run generation on.")
|
||||
|
||||
parser.add_argument(
|
||||
"--convert-to-rle",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Save masks as COCO RLEs in a single json instead of as a folder of PNGs. "
|
||||
"Requires pycocotools."
|
||||
),
|
||||
)
|
||||
|
||||
amg_settings = parser.add_argument_group("AMG Settings")
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--points-per-side",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Generate masks by sampling a grid over the image with this many points to a side.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--points-per-batch",
|
||||
type=int,
|
||||
default=None,
|
||||
help="How many input points to process simultaneously in one batch.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--pred-iou-thresh",
|
||||
type=float,
|
||||
default=None,
|
||||
help="Exclude masks with a predicted score from the model that is lower than this threshold.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--stability-score-thresh",
|
||||
type=float,
|
||||
default=None,
|
||||
help="Exclude masks with a stability score lower than this threshold.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--stability-score-offset",
|
||||
type=float,
|
||||
default=None,
|
||||
help="Larger values perturb the mask more when measuring stability score.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--box-nms-thresh",
|
||||
type=float,
|
||||
default=None,
|
||||
help="The overlap threshold for excluding a duplicate mask.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--crop-n-layers",
|
||||
type=int,
|
||||
default=None,
|
||||
help=(
|
||||
"If >0, mask generation is run on smaller crops of the image to generate more masks. "
|
||||
"The value sets how many different scales to crop at."
|
||||
),
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--crop-nms-thresh",
|
||||
type=float,
|
||||
default=None,
|
||||
help="The overlap threshold for excluding duplicate masks across different crops.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--crop-overlap-ratio",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Larger numbers mean image crops will overlap more.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--crop-n-points-downscale-factor",
|
||||
type=int,
|
||||
default=None,
|
||||
help="The number of points-per-side in each layer of crop is reduced by this factor.",
|
||||
)
|
||||
|
||||
amg_settings.add_argument(
|
||||
"--min-mask-region-area",
|
||||
type=int,
|
||||
default=None,
|
||||
help=(
|
||||
"Disconnected mask regions or holes with area smaller than this value "
|
||||
"in pixels are removed by postprocessing."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:
|
||||
header = "id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h" # noqa
|
||||
metadata = [header]
|
||||
for i, mask_data in enumerate(masks):
|
||||
mask = mask_data["segmentation"]
|
||||
filename = f"{i}.png"
|
||||
cv2.imwrite(os.path.join(path, filename), mask * 255)
|
||||
mask_metadata = [
|
||||
str(i),
|
||||
str(mask_data["area"]),
|
||||
*[str(x) for x in mask_data["bbox"]],
|
||||
*[str(x) for x in mask_data["point_coords"][0]],
|
||||
str(mask_data["predicted_iou"]),
|
||||
str(mask_data["stability_score"]),
|
||||
*[str(x) for x in mask_data["crop_box"]],
|
||||
]
|
||||
row = ",".join(mask_metadata)
|
||||
metadata.append(row)
|
||||
metadata_path = os.path.join(path, "metadata.csv")
|
||||
with open(metadata_path, "w") as f:
|
||||
f.write("\n".join(metadata))
|
||||
|
||||
return
|
||||
|
||||
|
||||
def get_amg_kwargs(args):
|
||||
amg_kwargs = {
|
||||
"points_per_side": args.points_per_side,
|
||||
"points_per_batch": args.points_per_batch,
|
||||
"pred_iou_thresh": args.pred_iou_thresh,
|
||||
"stability_score_thresh": args.stability_score_thresh,
|
||||
"stability_score_offset": args.stability_score_offset,
|
||||
"box_nms_thresh": args.box_nms_thresh,
|
||||
"crop_n_layers": args.crop_n_layers,
|
||||
"crop_nms_thresh": args.crop_nms_thresh,
|
||||
"crop_overlap_ratio": args.crop_overlap_ratio,
|
||||
"crop_n_points_downscale_factor": args.crop_n_points_downscale_factor,
|
||||
"min_mask_region_area": args.min_mask_region_area,
|
||||
}
|
||||
amg_kwargs = {k: v for k, v in amg_kwargs.items() if v is not None}
|
||||
return amg_kwargs
|
||||
|
||||
|
||||
def main(args: argparse.Namespace) -> None:
|
||||
print("Loading model...")
|
||||
sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint)
|
||||
_ = sam.to(device=args.device)
|
||||
output_mode = "coco_rle" if args.convert_to_rle else "binary_mask"
|
||||
amg_kwargs = get_amg_kwargs(args)
|
||||
generator = SamAutomaticMaskGenerator(sam, output_mode=output_mode, **amg_kwargs)
|
||||
|
||||
if not os.path.isdir(args.input):
|
||||
targets = [args.input]
|
||||
else:
|
||||
targets = [
|
||||
f for f in os.listdir(args.input) if not os.path.isdir(os.path.join(args.input, f))
|
||||
]
|
||||
targets = [os.path.join(args.input, f) for f in targets]
|
||||
|
||||
os.makedirs(args.output, exist_ok=True)
|
||||
|
||||
for t in targets:
|
||||
print(f"Processing '{t}'...")
|
||||
image = cv2.imread(t)
|
||||
if image is None:
|
||||
print(f"Could not load '{t}' as an image, skipping...")
|
||||
continue
|
||||
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||
|
||||
masks = generator.generate(image)
|
||||
|
||||
base = os.path.basename(t)
|
||||
base = os.path.splitext(base)[0]
|
||||
save_base = os.path.join(args.output, base)
|
||||
if output_mode == "binary_mask":
|
||||
os.makedirs(save_base, exist_ok=False)
|
||||
write_masks_to_folder(masks, save_base)
|
||||
else:
|
||||
save_file = save_base + ".json"
|
||||
with open(save_file, "w") as f:
|
||||
json.dump(masks, f)
|
||||
print("Done!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
@@ -0,0 +1,201 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
|
||||
from segment_anything import sam_model_registry
|
||||
from segment_anything.utils.onnx import SamOnnxModel
|
||||
|
||||
import argparse
|
||||
import warnings
|
||||
|
||||
try:
|
||||
import onnxruntime # type: ignore
|
||||
|
||||
onnxruntime_exists = True
|
||||
except ImportError:
|
||||
onnxruntime_exists = False
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Export the SAM prompt encoder and mask decoder to an ONNX model."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--checkpoint", type=str, required=True, help="The path to the SAM model checkpoint."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--output", type=str, required=True, help="The filename to save the ONNX model to."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--model-type",
|
||||
type=str,
|
||||
required=True,
|
||||
help="In ['default', 'vit_h', 'vit_l', 'vit_b']. Which type of SAM model to export.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--return-single-mask",
|
||||
action="store_true",
|
||||
help=(
|
||||
"If true, the exported ONNX model will only return the best mask, "
|
||||
"instead of returning multiple masks. For high resolution images "
|
||||
"this can improve runtime when upscaling masks is expensive."
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--opset",
|
||||
type=int,
|
||||
default=17,
|
||||
help="The ONNX opset version to use. Must be >=11",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--quantize-out",
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"If set, will quantize the model and save it with this name. "
|
||||
"Quantization is performed with quantize_dynamic from onnxruntime.quantization.quantize."
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--gelu-approximate",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Replace GELU operations with approximations using tanh. Useful "
|
||||
"for some runtimes that have slow or unimplemented erf ops, used in GELU."
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--use-stability-score",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Replaces the model's predicted mask quality score with the stability "
|
||||
"score calculated on the low resolution masks using an offset of 1.0. "
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--return-extra-metrics",
|
||||
action="store_true",
|
||||
help=(
|
||||
"The model will return five results: (masks, scores, stability_scores, "
|
||||
"areas, low_res_logits) instead of the usual three. This can be "
|
||||
"significantly slower for high resolution outputs."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def run_export(
|
||||
model_type: str,
|
||||
checkpoint: str,
|
||||
output: str,
|
||||
opset: int,
|
||||
return_single_mask: bool,
|
||||
gelu_approximate: bool = False,
|
||||
use_stability_score: bool = False,
|
||||
return_extra_metrics=False,
|
||||
):
|
||||
print("Loading model...")
|
||||
sam = sam_model_registry[model_type](checkpoint=checkpoint)
|
||||
|
||||
onnx_model = SamOnnxModel(
|
||||
model=sam,
|
||||
return_single_mask=return_single_mask,
|
||||
use_stability_score=use_stability_score,
|
||||
return_extra_metrics=return_extra_metrics,
|
||||
)
|
||||
|
||||
if gelu_approximate:
|
||||
for n, m in onnx_model.named_modules():
|
||||
if isinstance(m, torch.nn.GELU):
|
||||
m.approximate = "tanh"
|
||||
|
||||
dynamic_axes = {
|
||||
"point_coords": {1: "num_points"},
|
||||
"point_labels": {1: "num_points"},
|
||||
}
|
||||
|
||||
embed_dim = sam.prompt_encoder.embed_dim
|
||||
embed_size = sam.prompt_encoder.image_embedding_size
|
||||
mask_input_size = [4 * x for x in embed_size]
|
||||
dummy_inputs = {
|
||||
"image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float),
|
||||
"point_coords": torch.randint(low=0, high=1024, size=(1, 5, 2), dtype=torch.float),
|
||||
"point_labels": torch.randint(low=0, high=4, size=(1, 5), dtype=torch.float),
|
||||
"mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float),
|
||||
"has_mask_input": torch.tensor([1], dtype=torch.float),
|
||||
"orig_im_size": torch.tensor([1500, 2250], dtype=torch.float),
|
||||
}
|
||||
|
||||
_ = onnx_model(**dummy_inputs)
|
||||
|
||||
output_names = ["masks", "iou_predictions", "low_res_masks"]
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
|
||||
warnings.filterwarnings("ignore", category=UserWarning)
|
||||
with open(output, "wb") as f:
|
||||
print(f"Exporting onnx model to {output}...")
|
||||
torch.onnx.export(
|
||||
onnx_model,
|
||||
tuple(dummy_inputs.values()),
|
||||
f,
|
||||
export_params=True,
|
||||
verbose=False,
|
||||
opset_version=opset,
|
||||
do_constant_folding=True,
|
||||
input_names=list(dummy_inputs.keys()),
|
||||
output_names=output_names,
|
||||
dynamic_axes=dynamic_axes,
|
||||
)
|
||||
|
||||
if onnxruntime_exists:
|
||||
ort_inputs = {k: to_numpy(v) for k, v in dummy_inputs.items()}
|
||||
# set cpu provider default
|
||||
providers = ["CPUExecutionProvider"]
|
||||
ort_session = onnxruntime.InferenceSession(output, providers=providers)
|
||||
_ = ort_session.run(None, ort_inputs)
|
||||
print("Model has successfully been run with ONNXRuntime.")
|
||||
|
||||
|
||||
def to_numpy(tensor):
|
||||
return tensor.cpu().numpy()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
run_export(
|
||||
model_type=args.model_type,
|
||||
checkpoint=args.checkpoint,
|
||||
output=args.output,
|
||||
opset=args.opset,
|
||||
return_single_mask=args.return_single_mask,
|
||||
gelu_approximate=args.gelu_approximate,
|
||||
use_stability_score=args.use_stability_score,
|
||||
return_extra_metrics=args.return_extra_metrics,
|
||||
)
|
||||
|
||||
if args.quantize_out is not None:
|
||||
assert onnxruntime_exists, "onnxruntime is required to quantize the model."
|
||||
from onnxruntime.quantization import QuantType # type: ignore
|
||||
from onnxruntime.quantization.quantize import quantize_dynamic # type: ignore
|
||||
|
||||
print(f"Quantizing model and writing to {args.quantize_out}...")
|
||||
quantize_dynamic(
|
||||
model_input=args.output,
|
||||
model_output=args.quantize_out,
|
||||
optimize_model=True,
|
||||
per_channel=False,
|
||||
reduce_range=False,
|
||||
weight_type=QuantType.QUInt8,
|
||||
)
|
||||
print("Done!")
|
||||
@@ -0,0 +1,15 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
from .build_sam import (
|
||||
build_sam,
|
||||
build_sam_vit_h,
|
||||
build_sam_vit_l,
|
||||
build_sam_vit_b,
|
||||
sam_model_registry,
|
||||
)
|
||||
from .predictor import SamPredictor
|
||||
from .automatic_mask_generator import SamAutomaticMaskGenerator
|
||||
@@ -0,0 +1,372 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
|
||||
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from .modeling import Sam
|
||||
from .predictor import SamPredictor
|
||||
from .utils.amg import (
|
||||
MaskData,
|
||||
area_from_rle,
|
||||
batch_iterator,
|
||||
batched_mask_to_box,
|
||||
box_xyxy_to_xywh,
|
||||
build_all_layer_point_grids,
|
||||
calculate_stability_score,
|
||||
coco_encode_rle,
|
||||
generate_crop_boxes,
|
||||
is_box_near_crop_edge,
|
||||
mask_to_rle_pytorch,
|
||||
remove_small_regions,
|
||||
rle_to_mask,
|
||||
uncrop_boxes_xyxy,
|
||||
uncrop_masks,
|
||||
uncrop_points,
|
||||
)
|
||||
|
||||
|
||||
class SamAutomaticMaskGenerator:
|
||||
def __init__(
|
||||
self,
|
||||
model: Sam,
|
||||
points_per_side: Optional[int] = 32,
|
||||
points_per_batch: int = 64,
|
||||
pred_iou_thresh: float = 0.88,
|
||||
stability_score_thresh: float = 0.95,
|
||||
stability_score_offset: float = 1.0,
|
||||
box_nms_thresh: float = 0.7,
|
||||
crop_n_layers: int = 0,
|
||||
crop_nms_thresh: float = 0.7,
|
||||
crop_overlap_ratio: float = 512 / 1500,
|
||||
crop_n_points_downscale_factor: int = 1,
|
||||
point_grids: Optional[List[np.ndarray]] = None,
|
||||
min_mask_region_area: int = 0,
|
||||
output_mode: str = "binary_mask",
|
||||
) -> None:
|
||||
"""
|
||||
Using a SAM model, generates masks for the entire image.
|
||||
Generates a grid of point prompts over the image, then filters
|
||||
low quality and duplicate masks. The default settings are chosen
|
||||
for SAM with a ViT-H backbone.
|
||||
|
||||
Arguments:
|
||||
model (Sam): The SAM model to use for mask prediction.
|
||||
points_per_side (int or None): The number of points to be sampled
|
||||
along one side of the image. The total number of points is
|
||||
points_per_side**2. If None, 'point_grids' must provide explicit
|
||||
point sampling.
|
||||
points_per_batch (int): Sets the number of points run simultaneously
|
||||
by the model. Higher numbers may be faster but use more GPU memory.
|
||||
pred_iou_thresh (float): A filtering threshold in [0,1], using the
|
||||
model's predicted mask quality.
|
||||
stability_score_thresh (float): A filtering threshold in [0,1], using
|
||||
the stability of the mask under changes to the cutoff used to binarize
|
||||
the model's mask predictions.
|
||||
stability_score_offset (float): The amount to shift the cutoff when
|
||||
calculated the stability score.
|
||||
box_nms_thresh (float): The box IoU cutoff used by non-maximal
|
||||
suppression to filter duplicate masks.
|
||||
crop_n_layers (int): If >0, mask prediction will be run again on
|
||||
crops of the image. Sets the number of layers to run, where each
|
||||
layer has 2**i_layer number of image crops.
|
||||
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
|
||||
suppression to filter duplicate masks between different crops.
|
||||
crop_overlap_ratio (float): Sets the degree to which crops overlap.
|
||||
In the first crop layer, crops will overlap by this fraction of
|
||||
the image length. Later layers with more crops scale down this overlap.
|
||||
crop_n_points_downscale_factor (int): The number of points-per-side
|
||||
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
|
||||
point_grids (list(np.ndarray) or None): A list over explicit grids
|
||||
of points used for sampling, normalized to [0,1]. The nth grid in the
|
||||
list is used in the nth crop layer. Exclusive with points_per_side.
|
||||
min_mask_region_area (int): If >0, postprocessing will be applied
|
||||
to remove disconnected regions and holes in masks with area smaller
|
||||
than min_mask_region_area. Requires opencv.
|
||||
output_mode (str): The form masks are returned in. Can be 'binary_mask',
|
||||
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
|
||||
For large resolutions, 'binary_mask' may consume large amounts of
|
||||
memory.
|
||||
"""
|
||||
|
||||
assert (points_per_side is None) != (
|
||||
point_grids is None
|
||||
), "Exactly one of points_per_side or point_grid must be provided."
|
||||
if points_per_side is not None:
|
||||
self.point_grids = build_all_layer_point_grids(
|
||||
points_per_side,
|
||||
crop_n_layers,
|
||||
crop_n_points_downscale_factor,
|
||||
)
|
||||
elif point_grids is not None:
|
||||
self.point_grids = point_grids
|
||||
else:
|
||||
raise ValueError("Can't have both points_per_side and point_grid be None.")
|
||||
|
||||
assert output_mode in [
|
||||
"binary_mask",
|
||||
"uncompressed_rle",
|
||||
"coco_rle",
|
||||
], f"Unknown output_mode {output_mode}."
|
||||
if output_mode == "coco_rle":
|
||||
from pycocotools import mask as mask_utils # type: ignore # noqa: F401
|
||||
|
||||
if min_mask_region_area > 0:
|
||||
import cv2 # type: ignore # noqa: F401
|
||||
|
||||
self.predictor = SamPredictor(model)
|
||||
self.points_per_batch = points_per_batch
|
||||
self.pred_iou_thresh = pred_iou_thresh
|
||||
self.stability_score_thresh = stability_score_thresh
|
||||
self.stability_score_offset = stability_score_offset
|
||||
self.box_nms_thresh = box_nms_thresh
|
||||
self.crop_n_layers = crop_n_layers
|
||||
self.crop_nms_thresh = crop_nms_thresh
|
||||
self.crop_overlap_ratio = crop_overlap_ratio
|
||||
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
|
||||
self.min_mask_region_area = min_mask_region_area
|
||||
self.output_mode = output_mode
|
||||
|
||||
@torch.no_grad()
|
||||
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generates masks for the given image.
|
||||
|
||||
Arguments:
|
||||
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
|
||||
|
||||
Returns:
|
||||
list(dict(str, any)): A list over records for masks. Each record is
|
||||
a dict containing the following keys:
|
||||
segmentation (dict(str, any) or np.ndarray): The mask. If
|
||||
output_mode='binary_mask', is an array of shape HW. Otherwise,
|
||||
is a dictionary containing the RLE.
|
||||
bbox (list(float)): The box around the mask, in XYWH format.
|
||||
area (int): The area in pixels of the mask.
|
||||
predicted_iou (float): The model's own prediction of the mask's
|
||||
quality. This is filtered by the pred_iou_thresh parameter.
|
||||
point_coords (list(list(float))): The point coordinates input
|
||||
to the model to generate this mask.
|
||||
stability_score (float): A measure of the mask's quality. This
|
||||
is filtered on using the stability_score_thresh parameter.
|
||||
crop_box (list(float)): The crop of the image used to generate
|
||||
the mask, given in XYWH format.
|
||||
"""
|
||||
|
||||
# Generate masks
|
||||
mask_data = self._generate_masks(image)
|
||||
|
||||
# Filter small disconnected regions and holes in masks
|
||||
if self.min_mask_region_area > 0:
|
||||
mask_data = self.postprocess_small_regions(
|
||||
mask_data,
|
||||
self.min_mask_region_area,
|
||||
max(self.box_nms_thresh, self.crop_nms_thresh),
|
||||
)
|
||||
|
||||
# Encode masks
|
||||
if self.output_mode == "coco_rle":
|
||||
mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
|
||||
elif self.output_mode == "binary_mask":
|
||||
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
|
||||
else:
|
||||
mask_data["segmentations"] = mask_data["rles"]
|
||||
|
||||
# Write mask records
|
||||
curr_anns = []
|
||||
for idx in range(len(mask_data["segmentations"])):
|
||||
ann = {
|
||||
"segmentation": mask_data["segmentations"][idx],
|
||||
"area": area_from_rle(mask_data["rles"][idx]),
|
||||
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
|
||||
"predicted_iou": mask_data["iou_preds"][idx].item(),
|
||||
"point_coords": [mask_data["points"][idx].tolist()],
|
||||
"stability_score": mask_data["stability_score"][idx].item(),
|
||||
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
|
||||
}
|
||||
curr_anns.append(ann)
|
||||
|
||||
return curr_anns
|
||||
|
||||
def _generate_masks(self, image: np.ndarray) -> MaskData:
|
||||
orig_size = image.shape[:2]
|
||||
crop_boxes, layer_idxs = generate_crop_boxes(
|
||||
orig_size, self.crop_n_layers, self.crop_overlap_ratio
|
||||
)
|
||||
|
||||
# Iterate over image crops
|
||||
data = MaskData()
|
||||
for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
|
||||
crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
|
||||
data.cat(crop_data)
|
||||
|
||||
# Remove duplicate masks between crops
|
||||
if len(crop_boxes) > 1:
|
||||
# Prefer masks from smaller crops
|
||||
scores = 1 / box_area(data["crop_boxes"])
|
||||
scores = scores.to(data["boxes"].device)
|
||||
keep_by_nms = batched_nms(
|
||||
data["boxes"].float(),
|
||||
scores,
|
||||
torch.zeros_like(data["boxes"][:, 0]), # categories
|
||||
iou_threshold=self.crop_nms_thresh,
|
||||
)
|
||||
data.filter(keep_by_nms)
|
||||
|
||||
data.to_numpy()
|
||||
return data
|
||||
|
||||
def _process_crop(
|
||||
self,
|
||||
image: np.ndarray,
|
||||
crop_box: List[int],
|
||||
crop_layer_idx: int,
|
||||
orig_size: Tuple[int, ...],
|
||||
) -> MaskData:
|
||||
# Crop the image and calculate embeddings
|
||||
x0, y0, x1, y1 = crop_box
|
||||
cropped_im = image[y0:y1, x0:x1, :]
|
||||
cropped_im_size = cropped_im.shape[:2]
|
||||
self.predictor.set_image(cropped_im)
|
||||
|
||||
# Get points for this crop
|
||||
points_scale = np.array(cropped_im_size)[None, ::-1]
|
||||
points_for_image = self.point_grids[crop_layer_idx] * points_scale
|
||||
|
||||
# Generate masks for this crop in batches
|
||||
data = MaskData()
|
||||
for (points,) in batch_iterator(self.points_per_batch, points_for_image):
|
||||
batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size)
|
||||
data.cat(batch_data)
|
||||
del batch_data
|
||||
self.predictor.reset_image()
|
||||
|
||||
# Remove duplicates within this crop.
|
||||
keep_by_nms = batched_nms(
|
||||
data["boxes"].float(),
|
||||
data["iou_preds"],
|
||||
torch.zeros_like(data["boxes"][:, 0]), # categories
|
||||
iou_threshold=self.box_nms_thresh,
|
||||
)
|
||||
data.filter(keep_by_nms)
|
||||
|
||||
# Return to the original image frame
|
||||
data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
|
||||
data["points"] = uncrop_points(data["points"], crop_box)
|
||||
data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
|
||||
|
||||
return data
|
||||
|
||||
def _process_batch(
|
||||
self,
|
||||
points: np.ndarray,
|
||||
im_size: Tuple[int, ...],
|
||||
crop_box: List[int],
|
||||
orig_size: Tuple[int, ...],
|
||||
) -> MaskData:
|
||||
orig_h, orig_w = orig_size
|
||||
|
||||
# Run model on this batch
|
||||
transformed_points = self.predictor.transform.apply_coords(points, im_size)
|
||||
in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
|
||||
in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
|
||||
masks, iou_preds, _ = self.predictor.predict_torch(
|
||||
in_points[:, None, :],
|
||||
in_labels[:, None],
|
||||
multimask_output=True,
|
||||
return_logits=True,
|
||||
)
|
||||
|
||||
# Serialize predictions and store in MaskData
|
||||
data = MaskData(
|
||||
masks=masks.flatten(0, 1),
|
||||
iou_preds=iou_preds.flatten(0, 1),
|
||||
points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
|
||||
)
|
||||
del masks
|
||||
|
||||
# Filter by predicted IoU
|
||||
if self.pred_iou_thresh > 0.0:
|
||||
keep_mask = data["iou_preds"] > self.pred_iou_thresh
|
||||
data.filter(keep_mask)
|
||||
|
||||
# Calculate stability score
|
||||
data["stability_score"] = calculate_stability_score(
|
||||
data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset
|
||||
)
|
||||
if self.stability_score_thresh > 0.0:
|
||||
keep_mask = data["stability_score"] >= self.stability_score_thresh
|
||||
data.filter(keep_mask)
|
||||
|
||||
# Threshold masks and calculate boxes
|
||||
data["masks"] = data["masks"] > self.predictor.model.mask_threshold
|
||||
data["boxes"] = batched_mask_to_box(data["masks"])
|
||||
|
||||
# Filter boxes that touch crop boundaries
|
||||
keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
|
||||
if not torch.all(keep_mask):
|
||||
data.filter(keep_mask)
|
||||
|
||||
# Compress to RLE
|
||||
data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
|
||||
data["rles"] = mask_to_rle_pytorch(data["masks"])
|
||||
del data["masks"]
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def postprocess_small_regions(
|
||||
mask_data: MaskData, min_area: int, nms_thresh: float
|
||||
) -> MaskData:
|
||||
"""
|
||||
Removes small disconnected regions and holes in masks, then reruns
|
||||
box NMS to remove any new duplicates.
|
||||
|
||||
Edits mask_data in place.
|
||||
|
||||
Requires open-cv as a dependency.
|
||||
"""
|
||||
if len(mask_data["rles"]) == 0:
|
||||
return mask_data
|
||||
|
||||
# Filter small disconnected regions and holes
|
||||
new_masks = []
|
||||
scores = []
|
||||
for rle in mask_data["rles"]:
|
||||
mask = rle_to_mask(rle)
|
||||
|
||||
mask, changed = remove_small_regions(mask, min_area, mode="holes")
|
||||
unchanged = not changed
|
||||
mask, changed = remove_small_regions(mask, min_area, mode="islands")
|
||||
unchanged = unchanged and not changed
|
||||
|
||||
new_masks.append(torch.as_tensor(mask).unsqueeze(0))
|
||||
# Give score=0 to changed masks and score=1 to unchanged masks
|
||||
# so NMS will prefer ones that didn't need postprocessing
|
||||
scores.append(float(unchanged))
|
||||
|
||||
# Recalculate boxes and remove any new duplicates
|
||||
masks = torch.cat(new_masks, dim=0)
|
||||
boxes = batched_mask_to_box(masks)
|
||||
keep_by_nms = batched_nms(
|
||||
boxes.float(),
|
||||
torch.as_tensor(scores),
|
||||
torch.zeros_like(boxes[:, 0]), # categories
|
||||
iou_threshold=nms_thresh,
|
||||
)
|
||||
|
||||
# Only recalculate RLEs for masks that have changed
|
||||
for i_mask in keep_by_nms:
|
||||
if scores[i_mask] == 0.0:
|
||||
mask_torch = masks[i_mask].unsqueeze(0)
|
||||
mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
|
||||
mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
|
||||
mask_data.filter(keep_by_nms)
|
||||
|
||||
return mask_data
|
||||
@@ -0,0 +1,107 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
|
||||
from functools import partial
|
||||
|
||||
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
|
||||
|
||||
|
||||
def build_sam_vit_h(checkpoint=None):
|
||||
return _build_sam(
|
||||
encoder_embed_dim=1280,
|
||||
encoder_depth=32,
|
||||
encoder_num_heads=16,
|
||||
encoder_global_attn_indexes=[7, 15, 23, 31],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
build_sam = build_sam_vit_h
|
||||
|
||||
|
||||
def build_sam_vit_l(checkpoint=None):
|
||||
return _build_sam(
|
||||
encoder_embed_dim=1024,
|
||||
encoder_depth=24,
|
||||
encoder_num_heads=16,
|
||||
encoder_global_attn_indexes=[5, 11, 17, 23],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_sam_vit_b(checkpoint=None):
|
||||
return _build_sam(
|
||||
encoder_embed_dim=768,
|
||||
encoder_depth=12,
|
||||
encoder_num_heads=12,
|
||||
encoder_global_attn_indexes=[2, 5, 8, 11],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
sam_model_registry = {
|
||||
"default": build_sam_vit_h,
|
||||
"vit_h": build_sam_vit_h,
|
||||
"vit_l": build_sam_vit_l,
|
||||
"vit_b": build_sam_vit_b,
|
||||
}
|
||||
|
||||
|
||||
def _build_sam(
|
||||
encoder_embed_dim,
|
||||
encoder_depth,
|
||||
encoder_num_heads,
|
||||
encoder_global_attn_indexes,
|
||||
checkpoint=None,
|
||||
):
|
||||
prompt_embed_dim = 256
|
||||
image_size = 1024
|
||||
vit_patch_size = 16
|
||||
image_embedding_size = image_size // vit_patch_size
|
||||
sam = Sam(
|
||||
image_encoder=ImageEncoderViT(
|
||||
depth=encoder_depth,
|
||||
embed_dim=encoder_embed_dim,
|
||||
img_size=image_size,
|
||||
mlp_ratio=4,
|
||||
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
|
||||
num_heads=encoder_num_heads,
|
||||
patch_size=vit_patch_size,
|
||||
qkv_bias=True,
|
||||
use_rel_pos=True,
|
||||
global_attn_indexes=encoder_global_attn_indexes,
|
||||
window_size=14,
|
||||
out_chans=prompt_embed_dim,
|
||||
),
|
||||
prompt_encoder=PromptEncoder(
|
||||
embed_dim=prompt_embed_dim,
|
||||
image_embedding_size=(image_embedding_size, image_embedding_size),
|
||||
input_image_size=(image_size, image_size),
|
||||
mask_in_chans=16,
|
||||
),
|
||||
mask_decoder=MaskDecoder(
|
||||
num_multimask_outputs=3,
|
||||
transformer=TwoWayTransformer(
|
||||
depth=2,
|
||||
embedding_dim=prompt_embed_dim,
|
||||
mlp_dim=2048,
|
||||
num_heads=8,
|
||||
),
|
||||
transformer_dim=prompt_embed_dim,
|
||||
iou_head_depth=3,
|
||||
iou_head_hidden_dim=256,
|
||||
),
|
||||
pixel_mean=[123.675, 116.28, 103.53],
|
||||
pixel_std=[58.395, 57.12, 57.375],
|
||||
)
|
||||
sam.eval()
|
||||
if checkpoint is not None:
|
||||
with open(checkpoint, "rb") as f:
|
||||
state_dict = torch.load(f)
|
||||
sam.load_state_dict(state_dict)
|
||||
return sam
|
||||
@@ -0,0 +1,11 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
from .sam import Sam
|
||||
from .image_encoder import ImageEncoderViT
|
||||
from .mask_decoder import MaskDecoder
|
||||
from .prompt_encoder import PromptEncoder
|
||||
from .transformer import TwoWayTransformer
|
||||
@@ -0,0 +1,43 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from typing import Type
|
||||
|
||||
|
||||
class MLPBlock(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
mlp_dim: int,
|
||||
act: Type[nn.Module] = nn.GELU,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
|
||||
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
|
||||
self.act = act()
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return self.lin2(self.act(self.lin1(x)))
|
||||
|
||||
|
||||
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
|
||||
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
|
||||
class LayerNorm2d(nn.Module):
|
||||
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
||||
super().__init__()
|
||||
self.weight = nn.Parameter(torch.ones(num_channels))
|
||||
self.bias = nn.Parameter(torch.zeros(num_channels))
|
||||
self.eps = eps
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
u = x.mean(1, keepdim=True)
|
||||
s = (x - u).pow(2).mean(1, keepdim=True)
|
||||
x = (x - u) / torch.sqrt(s + self.eps)
|
||||
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
||||
return x
|
||||
@@ -0,0 +1,395 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from typing import Optional, Tuple, Type
|
||||
|
||||
from .common import LayerNorm2d, MLPBlock
|
||||
|
||||
|
||||
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
|
||||
class ImageEncoderViT(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
img_size: int = 1024,
|
||||
patch_size: int = 16,
|
||||
in_chans: int = 3,
|
||||
embed_dim: int = 768,
|
||||
depth: int = 12,
|
||||
num_heads: int = 12,
|
||||
mlp_ratio: float = 4.0,
|
||||
out_chans: int = 256,
|
||||
qkv_bias: bool = True,
|
||||
norm_layer: Type[nn.Module] = nn.LayerNorm,
|
||||
act_layer: Type[nn.Module] = nn.GELU,
|
||||
use_abs_pos: bool = True,
|
||||
use_rel_pos: bool = False,
|
||||
rel_pos_zero_init: bool = True,
|
||||
window_size: int = 0,
|
||||
global_attn_indexes: Tuple[int, ...] = (),
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
img_size (int): Input image size.
|
||||
patch_size (int): Patch size.
|
||||
in_chans (int): Number of input image channels.
|
||||
embed_dim (int): Patch embedding dimension.
|
||||
depth (int): Depth of ViT.
|
||||
num_heads (int): Number of attention heads in each ViT block.
|
||||
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
||||
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
||||
norm_layer (nn.Module): Normalization layer.
|
||||
act_layer (nn.Module): Activation layer.
|
||||
use_abs_pos (bool): If True, use absolute positional embeddings.
|
||||
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
||||
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
||||
window_size (int): Window size for window attention blocks.
|
||||
global_attn_indexes (list): Indexes for blocks using global attention.
|
||||
"""
|
||||
super().__init__()
|
||||
self.img_size = img_size
|
||||
|
||||
self.patch_embed = PatchEmbed(
|
||||
kernel_size=(patch_size, patch_size),
|
||||
stride=(patch_size, patch_size),
|
||||
in_chans=in_chans,
|
||||
embed_dim=embed_dim,
|
||||
)
|
||||
|
||||
self.pos_embed: Optional[nn.Parameter] = None
|
||||
if use_abs_pos:
|
||||
# Initialize absolute positional embedding with pretrain image size.
|
||||
self.pos_embed = nn.Parameter(
|
||||
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
|
||||
)
|
||||
|
||||
self.blocks = nn.ModuleList()
|
||||
for i in range(depth):
|
||||
block = Block(
|
||||
dim=embed_dim,
|
||||
num_heads=num_heads,
|
||||
mlp_ratio=mlp_ratio,
|
||||
qkv_bias=qkv_bias,
|
||||
norm_layer=norm_layer,
|
||||
act_layer=act_layer,
|
||||
use_rel_pos=use_rel_pos,
|
||||
rel_pos_zero_init=rel_pos_zero_init,
|
||||
window_size=window_size if i not in global_attn_indexes else 0,
|
||||
input_size=(img_size // patch_size, img_size // patch_size),
|
||||
)
|
||||
self.blocks.append(block)
|
||||
|
||||
self.neck = nn.Sequential(
|
||||
nn.Conv2d(
|
||||
embed_dim,
|
||||
out_chans,
|
||||
kernel_size=1,
|
||||
bias=False,
|
||||
),
|
||||
LayerNorm2d(out_chans),
|
||||
nn.Conv2d(
|
||||
out_chans,
|
||||
out_chans,
|
||||
kernel_size=3,
|
||||
padding=1,
|
||||
bias=False,
|
||||
),
|
||||
LayerNorm2d(out_chans),
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
x = self.patch_embed(x)
|
||||
if self.pos_embed is not None:
|
||||
x = x + self.pos_embed
|
||||
|
||||
for blk in self.blocks:
|
||||
x = blk(x)
|
||||
|
||||
x = self.neck(x.permute(0, 3, 1, 2))
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
"""Transformer blocks with support of window attention and residual propagation blocks"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
num_heads: int,
|
||||
mlp_ratio: float = 4.0,
|
||||
qkv_bias: bool = True,
|
||||
norm_layer: Type[nn.Module] = nn.LayerNorm,
|
||||
act_layer: Type[nn.Module] = nn.GELU,
|
||||
use_rel_pos: bool = False,
|
||||
rel_pos_zero_init: bool = True,
|
||||
window_size: int = 0,
|
||||
input_size: Optional[Tuple[int, int]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
dim (int): Number of input channels.
|
||||
num_heads (int): Number of attention heads in each ViT block.
|
||||
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
||||
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
||||
norm_layer (nn.Module): Normalization layer.
|
||||
act_layer (nn.Module): Activation layer.
|
||||
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
||||
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
||||
window_size (int): Window size for window attention blocks. If it equals 0, then
|
||||
use global attention.
|
||||
input_size (tuple(int, int) or None): Input resolution for calculating the relative
|
||||
positional parameter size.
|
||||
"""
|
||||
super().__init__()
|
||||
self.norm1 = norm_layer(dim)
|
||||
self.attn = Attention(
|
||||
dim,
|
||||
num_heads=num_heads,
|
||||
qkv_bias=qkv_bias,
|
||||
use_rel_pos=use_rel_pos,
|
||||
rel_pos_zero_init=rel_pos_zero_init,
|
||||
input_size=input_size if window_size == 0 else (window_size, window_size),
|
||||
)
|
||||
|
||||
self.norm2 = norm_layer(dim)
|
||||
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
|
||||
|
||||
self.window_size = window_size
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
shortcut = x
|
||||
x = self.norm1(x)
|
||||
# Window partition
|
||||
if self.window_size > 0:
|
||||
H, W = x.shape[1], x.shape[2]
|
||||
x, pad_hw = window_partition(x, self.window_size)
|
||||
|
||||
x = self.attn(x)
|
||||
# Reverse window partition
|
||||
if self.window_size > 0:
|
||||
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
|
||||
|
||||
x = shortcut + x
|
||||
x = x + self.mlp(self.norm2(x))
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class Attention(nn.Module):
|
||||
"""Multi-head Attention block with relative position embeddings."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
num_heads: int = 8,
|
||||
qkv_bias: bool = True,
|
||||
use_rel_pos: bool = False,
|
||||
rel_pos_zero_init: bool = True,
|
||||
input_size: Optional[Tuple[int, int]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
dim (int): Number of input channels.
|
||||
num_heads (int): Number of attention heads.
|
||||
qkv_bias (bool): If True, add a learnable bias to query, key, value.
|
||||
rel_pos (bool): If True, add relative positional embeddings to the attention map.
|
||||
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
|
||||
input_size (tuple(int, int) or None): Input resolution for calculating the relative
|
||||
positional parameter size.
|
||||
"""
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
self.scale = head_dim**-0.5
|
||||
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
|
||||
self.use_rel_pos = use_rel_pos
|
||||
if self.use_rel_pos:
|
||||
assert (
|
||||
input_size is not None
|
||||
), "Input size must be provided if using relative positional encoding."
|
||||
# initialize relative positional embeddings
|
||||
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
|
||||
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
B, H, W, _ = x.shape
|
||||
# qkv with shape (3, B, nHead, H * W, C)
|
||||
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||
# q, k, v with shape (B * nHead, H * W, C)
|
||||
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
|
||||
|
||||
attn = (q * self.scale) @ k.transpose(-2, -1)
|
||||
|
||||
if self.use_rel_pos:
|
||||
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
|
||||
|
||||
attn = attn.softmax(dim=-1)
|
||||
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
|
||||
x = self.proj(x)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
|
||||
"""
|
||||
Partition into non-overlapping windows with padding if needed.
|
||||
Args:
|
||||
x (tensor): input tokens with [B, H, W, C].
|
||||
window_size (int): window size.
|
||||
|
||||
Returns:
|
||||
windows: windows after partition with [B * num_windows, window_size, window_size, C].
|
||||
(Hp, Wp): padded height and width before partition
|
||||
"""
|
||||
B, H, W, C = x.shape
|
||||
|
||||
pad_h = (window_size - H % window_size) % window_size
|
||||
pad_w = (window_size - W % window_size) % window_size
|
||||
if pad_h > 0 or pad_w > 0:
|
||||
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
|
||||
Hp, Wp = H + pad_h, W + pad_w
|
||||
|
||||
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
|
||||
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
||||
return windows, (Hp, Wp)
|
||||
|
||||
|
||||
def window_unpartition(
|
||||
windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Window unpartition into original sequences and removing padding.
|
||||
Args:
|
||||
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
|
||||
window_size (int): window size.
|
||||
pad_hw (Tuple): padded height and width (Hp, Wp).
|
||||
hw (Tuple): original height and width (H, W) before padding.
|
||||
|
||||
Returns:
|
||||
x: unpartitioned sequences with [B, H, W, C].
|
||||
"""
|
||||
Hp, Wp = pad_hw
|
||||
H, W = hw
|
||||
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
|
||||
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
|
||||
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
|
||||
|
||||
if Hp > H or Wp > W:
|
||||
x = x[:, :H, :W, :].contiguous()
|
||||
return x
|
||||
|
||||
|
||||
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Get relative positional embeddings according to the relative positions of
|
||||
query and key sizes.
|
||||
Args:
|
||||
q_size (int): size of query q.
|
||||
k_size (int): size of key k.
|
||||
rel_pos (Tensor): relative position embeddings (L, C).
|
||||
|
||||
Returns:
|
||||
Extracted positional embeddings according to relative positions.
|
||||
"""
|
||||
max_rel_dist = int(2 * max(q_size, k_size) - 1)
|
||||
# Interpolate rel pos if needed.
|
||||
if rel_pos.shape[0] != max_rel_dist:
|
||||
# Interpolate rel pos.
|
||||
rel_pos_resized = F.interpolate(
|
||||
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
|
||||
size=max_rel_dist,
|
||||
mode="linear",
|
||||
)
|
||||
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
|
||||
else:
|
||||
rel_pos_resized = rel_pos
|
||||
|
||||
# Scale the coords with short length if shapes for q and k are different.
|
||||
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
|
||||
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
|
||||
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
|
||||
|
||||
return rel_pos_resized[relative_coords.long()]
|
||||
|
||||
|
||||
def add_decomposed_rel_pos(
|
||||
attn: torch.Tensor,
|
||||
q: torch.Tensor,
|
||||
rel_pos_h: torch.Tensor,
|
||||
rel_pos_w: torch.Tensor,
|
||||
q_size: Tuple[int, int],
|
||||
k_size: Tuple[int, int],
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
|
||||
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
|
||||
Args:
|
||||
attn (Tensor): attention map.
|
||||
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
|
||||
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
|
||||
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
|
||||
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
|
||||
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
|
||||
|
||||
Returns:
|
||||
attn (Tensor): attention map with added relative positional embeddings.
|
||||
"""
|
||||
q_h, q_w = q_size
|
||||
k_h, k_w = k_size
|
||||
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
|
||||
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
|
||||
|
||||
B, _, dim = q.shape
|
||||
r_q = q.reshape(B, q_h, q_w, dim)
|
||||
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
|
||||
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
|
||||
|
||||
attn = (
|
||||
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
|
||||
).view(B, q_h * q_w, k_h * k_w)
|
||||
|
||||
return attn
|
||||
|
||||
|
||||
class PatchEmbed(nn.Module):
|
||||
"""
|
||||
Image to Patch Embedding.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
kernel_size: Tuple[int, int] = (16, 16),
|
||||
stride: Tuple[int, int] = (16, 16),
|
||||
padding: Tuple[int, int] = (0, 0),
|
||||
in_chans: int = 3,
|
||||
embed_dim: int = 768,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
kernel_size (Tuple): kernel size of the projection layer.
|
||||
stride (Tuple): stride of the projection layer.
|
||||
padding (Tuple): padding size of the projection layer.
|
||||
in_chans (int): Number of input image channels.
|
||||
embed_dim (int): Patch embedding dimension.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.proj = nn.Conv2d(
|
||||
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
x = self.proj(x)
|
||||
# B C H W -> B H W C
|
||||
x = x.permute(0, 2, 3, 1)
|
||||
return x
|
||||
@@ -0,0 +1,176 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from typing import List, Tuple, Type
|
||||
|
||||
from .common import LayerNorm2d
|
||||
|
||||
|
||||
class MaskDecoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
transformer_dim: int,
|
||||
transformer: nn.Module,
|
||||
num_multimask_outputs: int = 3,
|
||||
activation: Type[nn.Module] = nn.GELU,
|
||||
iou_head_depth: int = 3,
|
||||
iou_head_hidden_dim: int = 256,
|
||||
) -> None:
|
||||
"""
|
||||
Predicts masks given an image and prompt embeddings, using a
|
||||
transformer architecture.
|
||||
|
||||
Arguments:
|
||||
transformer_dim (int): the channel dimension of the transformer
|
||||
transformer (nn.Module): the transformer used to predict masks
|
||||
num_multimask_outputs (int): the number of masks to predict
|
||||
when disambiguating masks
|
||||
activation (nn.Module): the type of activation to use when
|
||||
upscaling masks
|
||||
iou_head_depth (int): the depth of the MLP used to predict
|
||||
mask quality
|
||||
iou_head_hidden_dim (int): the hidden dimension of the MLP
|
||||
used to predict mask quality
|
||||
"""
|
||||
super().__init__()
|
||||
self.transformer_dim = transformer_dim
|
||||
self.transformer = transformer
|
||||
|
||||
self.num_multimask_outputs = num_multimask_outputs
|
||||
|
||||
self.iou_token = nn.Embedding(1, transformer_dim)
|
||||
self.num_mask_tokens = num_multimask_outputs + 1
|
||||
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
|
||||
|
||||
self.output_upscaling = nn.Sequential(
|
||||
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
|
||||
LayerNorm2d(transformer_dim // 4),
|
||||
activation(),
|
||||
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
|
||||
activation(),
|
||||
)
|
||||
self.output_hypernetworks_mlps = nn.ModuleList(
|
||||
[
|
||||
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
|
||||
for i in range(self.num_mask_tokens)
|
||||
]
|
||||
)
|
||||
|
||||
self.iou_prediction_head = MLP(
|
||||
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
image_embeddings: torch.Tensor,
|
||||
image_pe: torch.Tensor,
|
||||
sparse_prompt_embeddings: torch.Tensor,
|
||||
dense_prompt_embeddings: torch.Tensor,
|
||||
multimask_output: bool,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Predict masks given image and prompt embeddings.
|
||||
|
||||
Arguments:
|
||||
image_embeddings (torch.Tensor): the embeddings from the image encoder
|
||||
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
|
||||
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
|
||||
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
|
||||
multimask_output (bool): Whether to return multiple masks or a single
|
||||
mask.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: batched predicted masks
|
||||
torch.Tensor: batched predictions of mask quality
|
||||
"""
|
||||
masks, iou_pred = self.predict_masks(
|
||||
image_embeddings=image_embeddings,
|
||||
image_pe=image_pe,
|
||||
sparse_prompt_embeddings=sparse_prompt_embeddings,
|
||||
dense_prompt_embeddings=dense_prompt_embeddings,
|
||||
)
|
||||
|
||||
# Select the correct mask or masks for output
|
||||
if multimask_output:
|
||||
mask_slice = slice(1, None)
|
||||
else:
|
||||
mask_slice = slice(0, 1)
|
||||
masks = masks[:, mask_slice, :, :]
|
||||
iou_pred = iou_pred[:, mask_slice]
|
||||
|
||||
# Prepare output
|
||||
return masks, iou_pred
|
||||
|
||||
def predict_masks(
|
||||
self,
|
||||
image_embeddings: torch.Tensor,
|
||||
image_pe: torch.Tensor,
|
||||
sparse_prompt_embeddings: torch.Tensor,
|
||||
dense_prompt_embeddings: torch.Tensor,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Predicts masks. See 'forward' for more details."""
|
||||
# Concatenate output tokens
|
||||
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
|
||||
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
|
||||
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
|
||||
|
||||
# Expand per-image data in batch direction to be per-mask
|
||||
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
|
||||
src = src + dense_prompt_embeddings
|
||||
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
|
||||
b, c, h, w = src.shape
|
||||
|
||||
# Run the transformer
|
||||
hs, src = self.transformer(src, pos_src, tokens)
|
||||
iou_token_out = hs[:, 0, :]
|
||||
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
|
||||
|
||||
# Upscale mask embeddings and predict masks using the mask tokens
|
||||
src = src.transpose(1, 2).view(b, c, h, w)
|
||||
upscaled_embedding = self.output_upscaling(src)
|
||||
hyper_in_list: List[torch.Tensor] = []
|
||||
for i in range(self.num_mask_tokens):
|
||||
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
|
||||
hyper_in = torch.stack(hyper_in_list, dim=1)
|
||||
b, c, h, w = upscaled_embedding.shape
|
||||
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
|
||||
|
||||
# Generate mask quality predictions
|
||||
iou_pred = self.iou_prediction_head(iou_token_out)
|
||||
|
||||
return masks, iou_pred
|
||||
|
||||
|
||||
# Lightly adapted from
|
||||
# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
|
||||
class MLP(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
input_dim: int,
|
||||
hidden_dim: int,
|
||||
output_dim: int,
|
||||
num_layers: int,
|
||||
sigmoid_output: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.num_layers = num_layers
|
||||
h = [hidden_dim] * (num_layers - 1)
|
||||
self.layers = nn.ModuleList(
|
||||
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
|
||||
)
|
||||
self.sigmoid_output = sigmoid_output
|
||||
|
||||
def forward(self, x):
|
||||
for i, layer in enumerate(self.layers):
|
||||
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
|
||||
if self.sigmoid_output:
|
||||
x = F.sigmoid(x)
|
||||
return x
|
||||
@@ -0,0 +1,214 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from typing import Any, Optional, Tuple, Type
|
||||
|
||||
from .common import LayerNorm2d
|
||||
|
||||
|
||||
class PromptEncoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim: int,
|
||||
image_embedding_size: Tuple[int, int],
|
||||
input_image_size: Tuple[int, int],
|
||||
mask_in_chans: int,
|
||||
activation: Type[nn.Module] = nn.GELU,
|
||||
) -> None:
|
||||
"""
|
||||
Encodes prompts for input to SAM's mask decoder.
|
||||
|
||||
Arguments:
|
||||
embed_dim (int): The prompts' embedding dimension
|
||||
image_embedding_size (tuple(int, int)): The spatial size of the
|
||||
image embedding, as (H, W).
|
||||
input_image_size (int): The padded size of the image as input
|
||||
to the image encoder, as (H, W).
|
||||
mask_in_chans (int): The number of hidden channels used for
|
||||
encoding input masks.
|
||||
activation (nn.Module): The activation to use when encoding
|
||||
input masks.
|
||||
"""
|
||||
super().__init__()
|
||||
self.embed_dim = embed_dim
|
||||
self.input_image_size = input_image_size
|
||||
self.image_embedding_size = image_embedding_size
|
||||
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
|
||||
|
||||
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
|
||||
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
|
||||
self.point_embeddings = nn.ModuleList(point_embeddings)
|
||||
self.not_a_point_embed = nn.Embedding(1, embed_dim)
|
||||
|
||||
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
|
||||
self.mask_downscaling = nn.Sequential(
|
||||
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
|
||||
LayerNorm2d(mask_in_chans // 4),
|
||||
activation(),
|
||||
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
|
||||
LayerNorm2d(mask_in_chans),
|
||||
activation(),
|
||||
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
|
||||
)
|
||||
self.no_mask_embed = nn.Embedding(1, embed_dim)
|
||||
|
||||
def get_dense_pe(self) -> torch.Tensor:
|
||||
"""
|
||||
Returns the positional encoding used to encode point prompts,
|
||||
applied to a dense set of points the shape of the image encoding.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Positional encoding with shape
|
||||
1x(embed_dim)x(embedding_h)x(embedding_w)
|
||||
"""
|
||||
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
|
||||
|
||||
def _embed_points(
|
||||
self,
|
||||
points: torch.Tensor,
|
||||
labels: torch.Tensor,
|
||||
pad: bool,
|
||||
) -> torch.Tensor:
|
||||
"""Embeds point prompts."""
|
||||
points = points + 0.5 # Shift to center of pixel
|
||||
if pad:
|
||||
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
|
||||
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
|
||||
points = torch.cat([points, padding_point], dim=1)
|
||||
labels = torch.cat([labels, padding_label], dim=1)
|
||||
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
|
||||
point_embedding[labels == -1] = 0.0
|
||||
point_embedding[labels == -1] += self.not_a_point_embed.weight
|
||||
point_embedding[labels == 0] += self.point_embeddings[0].weight
|
||||
point_embedding[labels == 1] += self.point_embeddings[1].weight
|
||||
return point_embedding
|
||||
|
||||
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
|
||||
"""Embeds box prompts."""
|
||||
boxes = boxes + 0.5 # Shift to center of pixel
|
||||
coords = boxes.reshape(-1, 2, 2)
|
||||
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
|
||||
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
|
||||
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
|
||||
return corner_embedding
|
||||
|
||||
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
|
||||
"""Embeds mask inputs."""
|
||||
mask_embedding = self.mask_downscaling(masks)
|
||||
return mask_embedding
|
||||
|
||||
def _get_batch_size(
|
||||
self,
|
||||
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
|
||||
boxes: Optional[torch.Tensor],
|
||||
masks: Optional[torch.Tensor],
|
||||
) -> int:
|
||||
"""
|
||||
Gets the batch size of the output given the batch size of the input prompts.
|
||||
"""
|
||||
if points is not None:
|
||||
return points[0].shape[0]
|
||||
elif boxes is not None:
|
||||
return boxes.shape[0]
|
||||
elif masks is not None:
|
||||
return masks.shape[0]
|
||||
else:
|
||||
return 1
|
||||
|
||||
def _get_device(self) -> torch.device:
|
||||
return self.point_embeddings[0].weight.device
|
||||
|
||||
def forward(
|
||||
self,
|
||||
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
|
||||
boxes: Optional[torch.Tensor],
|
||||
masks: Optional[torch.Tensor],
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Embeds different types of prompts, returning both sparse and dense
|
||||
embeddings.
|
||||
|
||||
Arguments:
|
||||
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
|
||||
and labels to embed.
|
||||
boxes (torch.Tensor or none): boxes to embed
|
||||
masks (torch.Tensor or none): masks to embed
|
||||
|
||||
Returns:
|
||||
torch.Tensor: sparse embeddings for the points and boxes, with shape
|
||||
BxNx(embed_dim), where N is determined by the number of input points
|
||||
and boxes.
|
||||
torch.Tensor: dense embeddings for the masks, in the shape
|
||||
Bx(embed_dim)x(embed_H)x(embed_W)
|
||||
"""
|
||||
bs = self._get_batch_size(points, boxes, masks)
|
||||
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
|
||||
if points is not None:
|
||||
coords, labels = points
|
||||
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
|
||||
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
|
||||
if boxes is not None:
|
||||
box_embeddings = self._embed_boxes(boxes)
|
||||
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
|
||||
|
||||
if masks is not None:
|
||||
dense_embeddings = self._embed_masks(masks)
|
||||
else:
|
||||
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
|
||||
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
|
||||
)
|
||||
|
||||
return sparse_embeddings, dense_embeddings
|
||||
|
||||
|
||||
class PositionEmbeddingRandom(nn.Module):
|
||||
"""
|
||||
Positional encoding using random spatial frequencies.
|
||||
"""
|
||||
|
||||
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
|
||||
super().__init__()
|
||||
if scale is None or scale <= 0.0:
|
||||
scale = 1.0
|
||||
self.register_buffer(
|
||||
"positional_encoding_gaussian_matrix",
|
||||
scale * torch.randn((2, num_pos_feats)),
|
||||
)
|
||||
|
||||
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
|
||||
"""Positionally encode points that are normalized to [0,1]."""
|
||||
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
|
||||
coords = 2 * coords - 1
|
||||
coords = coords @ self.positional_encoding_gaussian_matrix
|
||||
coords = 2 * np.pi * coords
|
||||
# outputs d_1 x ... x d_n x C shape
|
||||
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
|
||||
|
||||
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
|
||||
"""Generate positional encoding for a grid of the specified size."""
|
||||
h, w = size
|
||||
device: Any = self.positional_encoding_gaussian_matrix.device
|
||||
grid = torch.ones((h, w), device=device, dtype=torch.float32)
|
||||
y_embed = grid.cumsum(dim=0) - 0.5
|
||||
x_embed = grid.cumsum(dim=1) - 0.5
|
||||
y_embed = y_embed / h
|
||||
x_embed = x_embed / w
|
||||
|
||||
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
|
||||
return pe.permute(2, 0, 1) # C x H x W
|
||||
|
||||
def forward_with_coords(
|
||||
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
|
||||
) -> torch.Tensor:
|
||||
"""Positionally encode points that are not normalized to [0,1]."""
|
||||
coords = coords_input.clone()
|
||||
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
|
||||
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
|
||||
return self._pe_encoding(coords.to(torch.float)) # B x N x C
|
||||
@@ -0,0 +1,174 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
from .image_encoder import ImageEncoderViT
|
||||
from .mask_decoder import MaskDecoder
|
||||
from .prompt_encoder import PromptEncoder
|
||||
|
||||
|
||||
class Sam(nn.Module):
|
||||
mask_threshold: float = 0.0
|
||||
image_format: str = "RGB"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
image_encoder: ImageEncoderViT,
|
||||
prompt_encoder: PromptEncoder,
|
||||
mask_decoder: MaskDecoder,
|
||||
pixel_mean: List[float] = [123.675, 116.28, 103.53],
|
||||
pixel_std: List[float] = [58.395, 57.12, 57.375],
|
||||
) -> None:
|
||||
"""
|
||||
SAM predicts object masks from an image and input prompts.
|
||||
|
||||
Arguments:
|
||||
image_encoder (ImageEncoderViT): The backbone used to encode the
|
||||
image into image embeddings that allow for efficient mask prediction.
|
||||
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
|
||||
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
|
||||
and encoded prompts.
|
||||
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
|
||||
pixel_std (list(float)): Std values for normalizing pixels in the input image.
|
||||
"""
|
||||
super().__init__()
|
||||
self.image_encoder = image_encoder
|
||||
self.prompt_encoder = prompt_encoder
|
||||
self.mask_decoder = mask_decoder
|
||||
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
|
||||
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
|
||||
|
||||
@property
|
||||
def device(self) -> Any:
|
||||
return self.pixel_mean.device
|
||||
|
||||
@torch.no_grad()
|
||||
def forward(
|
||||
self,
|
||||
batched_input: List[Dict[str, Any]],
|
||||
multimask_output: bool,
|
||||
) -> List[Dict[str, torch.Tensor]]:
|
||||
"""
|
||||
Predicts masks end-to-end from provided images and prompts.
|
||||
If prompts are not known in advance, using SamPredictor is
|
||||
recommended over calling the model directly.
|
||||
|
||||
Arguments:
|
||||
batched_input (list(dict)): A list over input images, each a
|
||||
dictionary with the following keys. A prompt key can be
|
||||
excluded if it is not present.
|
||||
'image': The image as a torch tensor in 3xHxW format,
|
||||
already transformed for input to the model.
|
||||
'original_size': (tuple(int, int)) The original size of
|
||||
the image before transformation, as (H, W).
|
||||
'point_coords': (torch.Tensor) Batched point prompts for
|
||||
this image, with shape BxNx2. Already transformed to the
|
||||
input frame of the model.
|
||||
'point_labels': (torch.Tensor) Batched labels for point prompts,
|
||||
with shape BxN.
|
||||
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
|
||||
Already transformed to the input frame of the model.
|
||||
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
|
||||
in the form Bx1xHxW.
|
||||
multimask_output (bool): Whether the model should predict multiple
|
||||
disambiguating masks, or return a single mask.
|
||||
|
||||
Returns:
|
||||
(list(dict)): A list over input images, where each element is
|
||||
as dictionary with the following keys.
|
||||
'masks': (torch.Tensor) Batched binary mask predictions,
|
||||
with shape BxCxHxW, where B is the number of input prompts,
|
||||
C is determined by multimask_output, and (H, W) is the
|
||||
original size of the image.
|
||||
'iou_predictions': (torch.Tensor) The model's predictions
|
||||
of mask quality, in shape BxC.
|
||||
'low_res_logits': (torch.Tensor) Low resolution logits with
|
||||
shape BxCxHxW, where H=W=256. Can be passed as mask input
|
||||
to subsequent iterations of prediction.
|
||||
"""
|
||||
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
|
||||
image_embeddings = self.image_encoder(input_images)
|
||||
|
||||
outputs = []
|
||||
for image_record, curr_embedding in zip(batched_input, image_embeddings):
|
||||
if "point_coords" in image_record:
|
||||
points = (image_record["point_coords"], image_record["point_labels"])
|
||||
else:
|
||||
points = None
|
||||
sparse_embeddings, dense_embeddings = self.prompt_encoder(
|
||||
points=points,
|
||||
boxes=image_record.get("boxes", None),
|
||||
masks=image_record.get("mask_inputs", None),
|
||||
)
|
||||
low_res_masks, iou_predictions = self.mask_decoder(
|
||||
image_embeddings=curr_embedding.unsqueeze(0),
|
||||
image_pe=self.prompt_encoder.get_dense_pe(),
|
||||
sparse_prompt_embeddings=sparse_embeddings,
|
||||
dense_prompt_embeddings=dense_embeddings,
|
||||
multimask_output=multimask_output,
|
||||
)
|
||||
masks = self.postprocess_masks(
|
||||
low_res_masks,
|
||||
input_size=image_record["image"].shape[-2:],
|
||||
original_size=image_record["original_size"],
|
||||
)
|
||||
masks = masks > self.mask_threshold
|
||||
outputs.append(
|
||||
{
|
||||
"masks": masks,
|
||||
"iou_predictions": iou_predictions,
|
||||
"low_res_logits": low_res_masks,
|
||||
}
|
||||
)
|
||||
return outputs
|
||||
|
||||
def postprocess_masks(
|
||||
self,
|
||||
masks: torch.Tensor,
|
||||
input_size: Tuple[int, ...],
|
||||
original_size: Tuple[int, ...],
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Remove padding and upscale masks to the original image size.
|
||||
|
||||
Arguments:
|
||||
masks (torch.Tensor): Batched masks from the mask_decoder,
|
||||
in BxCxHxW format.
|
||||
input_size (tuple(int, int)): The size of the image input to the
|
||||
model, in (H, W) format. Used to remove padding.
|
||||
original_size (tuple(int, int)): The original size of the image
|
||||
before resizing for input to the model, in (H, W) format.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
|
||||
is given by original_size.
|
||||
"""
|
||||
masks = F.interpolate(
|
||||
masks,
|
||||
(self.image_encoder.img_size, self.image_encoder.img_size),
|
||||
mode="bilinear",
|
||||
align_corners=False,
|
||||
)
|
||||
masks = masks[..., : input_size[0], : input_size[1]]
|
||||
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
|
||||
return masks
|
||||
|
||||
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Normalize pixel values and pad to a square input."""
|
||||
# Normalize colors
|
||||
x = (x - self.pixel_mean) / self.pixel_std
|
||||
|
||||
# Pad
|
||||
h, w = x.shape[-2:]
|
||||
padh = self.image_encoder.img_size - h
|
||||
padw = self.image_encoder.img_size - w
|
||||
x = F.pad(x, (0, padw, 0, padh))
|
||||
return x
|
||||
@@ -0,0 +1,240 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
import math
|
||||
from typing import Tuple, Type
|
||||
|
||||
from .common import MLPBlock
|
||||
|
||||
|
||||
class TwoWayTransformer(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
depth: int,
|
||||
embedding_dim: int,
|
||||
num_heads: int,
|
||||
mlp_dim: int,
|
||||
activation: Type[nn.Module] = nn.ReLU,
|
||||
attention_downsample_rate: int = 2,
|
||||
) -> None:
|
||||
"""
|
||||
A transformer decoder that attends to an input image using
|
||||
queries whose positional embedding is supplied.
|
||||
|
||||
Args:
|
||||
depth (int): number of layers in the transformer
|
||||
embedding_dim (int): the channel dimension for the input embeddings
|
||||
num_heads (int): the number of heads for multihead attention. Must
|
||||
divide embedding_dim
|
||||
mlp_dim (int): the channel dimension internal to the MLP block
|
||||
activation (nn.Module): the activation to use in the MLP block
|
||||
"""
|
||||
super().__init__()
|
||||
self.depth = depth
|
||||
self.embedding_dim = embedding_dim
|
||||
self.num_heads = num_heads
|
||||
self.mlp_dim = mlp_dim
|
||||
self.layers = nn.ModuleList()
|
||||
|
||||
for i in range(depth):
|
||||
self.layers.append(
|
||||
TwoWayAttentionBlock(
|
||||
embedding_dim=embedding_dim,
|
||||
num_heads=num_heads,
|
||||
mlp_dim=mlp_dim,
|
||||
activation=activation,
|
||||
attention_downsample_rate=attention_downsample_rate,
|
||||
skip_first_layer_pe=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
self.final_attn_token_to_image = Attention(
|
||||
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
||||
)
|
||||
self.norm_final_attn = nn.LayerNorm(embedding_dim)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
image_embedding: Tensor,
|
||||
image_pe: Tensor,
|
||||
point_embedding: Tensor,
|
||||
) -> Tuple[Tensor, Tensor]:
|
||||
"""
|
||||
Args:
|
||||
image_embedding (torch.Tensor): image to attend to. Should be shape
|
||||
B x embedding_dim x h x w for any h and w.
|
||||
image_pe (torch.Tensor): the positional encoding to add to the image. Must
|
||||
have the same shape as image_embedding.
|
||||
point_embedding (torch.Tensor): the embedding to add to the query points.
|
||||
Must have shape B x N_points x embedding_dim for any N_points.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: the processed point_embedding
|
||||
torch.Tensor: the processed image_embedding
|
||||
"""
|
||||
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
|
||||
bs, c, h, w = image_embedding.shape
|
||||
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
|
||||
image_pe = image_pe.flatten(2).permute(0, 2, 1)
|
||||
|
||||
# Prepare queries
|
||||
queries = point_embedding
|
||||
keys = image_embedding
|
||||
|
||||
# Apply transformer blocks and final layernorm
|
||||
for layer in self.layers:
|
||||
queries, keys = layer(
|
||||
queries=queries,
|
||||
keys=keys,
|
||||
query_pe=point_embedding,
|
||||
key_pe=image_pe,
|
||||
)
|
||||
|
||||
# Apply the final attention layer from the points to the image
|
||||
q = queries + point_embedding
|
||||
k = keys + image_pe
|
||||
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
|
||||
queries = queries + attn_out
|
||||
queries = self.norm_final_attn(queries)
|
||||
|
||||
return queries, keys
|
||||
|
||||
|
||||
class TwoWayAttentionBlock(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
num_heads: int,
|
||||
mlp_dim: int = 2048,
|
||||
activation: Type[nn.Module] = nn.ReLU,
|
||||
attention_downsample_rate: int = 2,
|
||||
skip_first_layer_pe: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
A transformer block with four layers: (1) self-attention of sparse
|
||||
inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
|
||||
block on sparse inputs, and (4) cross attention of dense inputs to sparse
|
||||
inputs.
|
||||
|
||||
Arguments:
|
||||
embedding_dim (int): the channel dimension of the embeddings
|
||||
num_heads (int): the number of heads in the attention layers
|
||||
mlp_dim (int): the hidden dimension of the mlp block
|
||||
activation (nn.Module): the activation of the mlp block
|
||||
skip_first_layer_pe (bool): skip the PE on the first layer
|
||||
"""
|
||||
super().__init__()
|
||||
self.self_attn = Attention(embedding_dim, num_heads)
|
||||
self.norm1 = nn.LayerNorm(embedding_dim)
|
||||
|
||||
self.cross_attn_token_to_image = Attention(
|
||||
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
||||
)
|
||||
self.norm2 = nn.LayerNorm(embedding_dim)
|
||||
|
||||
self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
|
||||
self.norm3 = nn.LayerNorm(embedding_dim)
|
||||
|
||||
self.norm4 = nn.LayerNorm(embedding_dim)
|
||||
self.cross_attn_image_to_token = Attention(
|
||||
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
||||
)
|
||||
|
||||
self.skip_first_layer_pe = skip_first_layer_pe
|
||||
|
||||
def forward(
|
||||
self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
|
||||
) -> Tuple[Tensor, Tensor]:
|
||||
# Self attention block
|
||||
if self.skip_first_layer_pe:
|
||||
queries = self.self_attn(q=queries, k=queries, v=queries)
|
||||
else:
|
||||
q = queries + query_pe
|
||||
attn_out = self.self_attn(q=q, k=q, v=queries)
|
||||
queries = queries + attn_out
|
||||
queries = self.norm1(queries)
|
||||
|
||||
# Cross attention block, tokens attending to image embedding
|
||||
q = queries + query_pe
|
||||
k = keys + key_pe
|
||||
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
|
||||
queries = queries + attn_out
|
||||
queries = self.norm2(queries)
|
||||
|
||||
# MLP block
|
||||
mlp_out = self.mlp(queries)
|
||||
queries = queries + mlp_out
|
||||
queries = self.norm3(queries)
|
||||
|
||||
# Cross attention block, image embedding attending to tokens
|
||||
q = queries + query_pe
|
||||
k = keys + key_pe
|
||||
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
|
||||
keys = keys + attn_out
|
||||
keys = self.norm4(keys)
|
||||
|
||||
return queries, keys
|
||||
|
||||
|
||||
class Attention(nn.Module):
|
||||
"""
|
||||
An attention layer that allows for downscaling the size of the embedding
|
||||
after projection to queries, keys, and values.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
num_heads: int,
|
||||
downsample_rate: int = 1,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.embedding_dim = embedding_dim
|
||||
self.internal_dim = embedding_dim // downsample_rate
|
||||
self.num_heads = num_heads
|
||||
assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
|
||||
|
||||
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
|
||||
self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
|
||||
self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
|
||||
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
|
||||
|
||||
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
|
||||
b, n, c = x.shape
|
||||
x = x.reshape(b, n, num_heads, c // num_heads)
|
||||
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
|
||||
|
||||
def _recombine_heads(self, x: Tensor) -> Tensor:
|
||||
b, n_heads, n_tokens, c_per_head = x.shape
|
||||
x = x.transpose(1, 2)
|
||||
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
|
||||
|
||||
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
|
||||
# Input projections
|
||||
q = self.q_proj(q)
|
||||
k = self.k_proj(k)
|
||||
v = self.v_proj(v)
|
||||
|
||||
# Separate into heads
|
||||
q = self._separate_heads(q, self.num_heads)
|
||||
k = self._separate_heads(k, self.num_heads)
|
||||
v = self._separate_heads(v, self.num_heads)
|
||||
|
||||
# Attention
|
||||
_, _, _, c_per_head = q.shape
|
||||
attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
|
||||
attn = attn / math.sqrt(c_per_head)
|
||||
attn = torch.softmax(attn, dim=-1)
|
||||
|
||||
# Get output
|
||||
out = attn @ v
|
||||
out = self._recombine_heads(out)
|
||||
out = self.out_proj(out)
|
||||
|
||||
return out
|
||||
@@ -0,0 +1,269 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from segment_anything.modeling import Sam
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from .utils.transforms import ResizeLongestSide
|
||||
|
||||
|
||||
class SamPredictor:
|
||||
def __init__(
|
||||
self,
|
||||
sam_model: Sam,
|
||||
) -> None:
|
||||
"""
|
||||
Uses SAM to calculate the image embedding for an image, and then
|
||||
allow repeated, efficient mask prediction given prompts.
|
||||
|
||||
Arguments:
|
||||
sam_model (Sam): The model to use for mask prediction.
|
||||
"""
|
||||
super().__init__()
|
||||
self.model = sam_model
|
||||
self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
|
||||
self.reset_image()
|
||||
|
||||
def set_image(
|
||||
self,
|
||||
image: np.ndarray,
|
||||
image_format: str = "RGB",
|
||||
) -> None:
|
||||
"""
|
||||
Calculates the image embeddings for the provided image, allowing
|
||||
masks to be predicted with the 'predict' method.
|
||||
|
||||
Arguments:
|
||||
image (np.ndarray): The image for calculating masks. Expects an
|
||||
image in HWC uint8 format, with pixel values in [0, 255].
|
||||
image_format (str): The color format of the image, in ['RGB', 'BGR'].
|
||||
"""
|
||||
assert image_format in [
|
||||
"RGB",
|
||||
"BGR",
|
||||
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
|
||||
if image_format != self.model.image_format:
|
||||
image = image[..., ::-1]
|
||||
|
||||
# Transform the image to the form expected by the model
|
||||
input_image = self.transform.apply_image(image)
|
||||
input_image_torch = torch.as_tensor(input_image, device=self.device)
|
||||
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
|
||||
|
||||
self.set_torch_image(input_image_torch, image.shape[:2])
|
||||
|
||||
@torch.no_grad()
|
||||
def set_torch_image(
|
||||
self,
|
||||
transformed_image: torch.Tensor,
|
||||
original_image_size: Tuple[int, ...],
|
||||
) -> None:
|
||||
"""
|
||||
Calculates the image embeddings for the provided image, allowing
|
||||
masks to be predicted with the 'predict' method. Expects the input
|
||||
image to be already transformed to the format expected by the model.
|
||||
|
||||
Arguments:
|
||||
transformed_image (torch.Tensor): The input image, with shape
|
||||
1x3xHxW, which has been transformed with ResizeLongestSide.
|
||||
original_image_size (tuple(int, int)): The size of the image
|
||||
before transformation, in (H, W) format.
|
||||
"""
|
||||
assert (
|
||||
len(transformed_image.shape) == 4
|
||||
and transformed_image.shape[1] == 3
|
||||
and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
|
||||
), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
|
||||
self.reset_image()
|
||||
|
||||
self.original_size = original_image_size
|
||||
self.input_size = tuple(transformed_image.shape[-2:])
|
||||
input_image = self.model.preprocess(transformed_image)
|
||||
self.features = self.model.image_encoder(input_image)
|
||||
self.is_image_set = True
|
||||
|
||||
def predict(
|
||||
self,
|
||||
point_coords: Optional[np.ndarray] = None,
|
||||
point_labels: Optional[np.ndarray] = None,
|
||||
box: Optional[np.ndarray] = None,
|
||||
mask_input: Optional[np.ndarray] = None,
|
||||
multimask_output: bool = True,
|
||||
return_logits: bool = False,
|
||||
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Predict masks for the given input prompts, using the currently set image.
|
||||
|
||||
Arguments:
|
||||
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
|
||||
model. Each point is in (X,Y) in pixels.
|
||||
point_labels (np.ndarray or None): A length N array of labels for the
|
||||
point prompts. 1 indicates a foreground point and 0 indicates a
|
||||
background point.
|
||||
box (np.ndarray or None): A length 4 array given a box prompt to the
|
||||
model, in XYXY format.
|
||||
mask_input (np.ndarray): A low resolution mask input to the model, typically
|
||||
coming from a previous prediction iteration. Has form 1xHxW, where
|
||||
for SAM, H=W=256.
|
||||
multimask_output (bool): If true, the model will return three masks.
|
||||
For ambiguous input prompts (such as a single click), this will often
|
||||
produce better masks than a single prediction. If only a single
|
||||
mask is needed, the model's predicted quality score can be used
|
||||
to select the best mask. For non-ambiguous prompts, such as multiple
|
||||
input prompts, multimask_output=False can give better results.
|
||||
return_logits (bool): If true, returns un-thresholded masks logits
|
||||
instead of a binary mask.
|
||||
|
||||
Returns:
|
||||
(np.ndarray): The output masks in CxHxW format, where C is the
|
||||
number of masks, and (H, W) is the original image size.
|
||||
(np.ndarray): An array of length C containing the model's
|
||||
predictions for the quality of each mask.
|
||||
(np.ndarray): An array of shape CxHxW, where C is the number
|
||||
of masks and H=W=256. These low resolution logits can be passed to
|
||||
a subsequent iteration as mask input.
|
||||
"""
|
||||
if not self.is_image_set:
|
||||
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
|
||||
|
||||
# Transform input prompts
|
||||
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
|
||||
if point_coords is not None:
|
||||
assert (
|
||||
point_labels is not None
|
||||
), "point_labels must be supplied if point_coords is supplied."
|
||||
point_coords = self.transform.apply_coords(point_coords, self.original_size)
|
||||
coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
|
||||
labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
|
||||
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
|
||||
if box is not None:
|
||||
box = self.transform.apply_boxes(box, self.original_size)
|
||||
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
|
||||
box_torch = box_torch[None, :]
|
||||
if mask_input is not None:
|
||||
mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
|
||||
mask_input_torch = mask_input_torch[None, :, :, :]
|
||||
|
||||
masks, iou_predictions, low_res_masks = self.predict_torch(
|
||||
coords_torch,
|
||||
labels_torch,
|
||||
box_torch,
|
||||
mask_input_torch,
|
||||
multimask_output,
|
||||
return_logits=return_logits,
|
||||
)
|
||||
|
||||
masks_np = masks[0].detach().cpu().numpy()
|
||||
iou_predictions_np = iou_predictions[0].detach().cpu().numpy()
|
||||
low_res_masks_np = low_res_masks[0].detach().cpu().numpy()
|
||||
return masks_np, iou_predictions_np, low_res_masks_np
|
||||
|
||||
@torch.no_grad()
|
||||
def predict_torch(
|
||||
self,
|
||||
point_coords: Optional[torch.Tensor],
|
||||
point_labels: Optional[torch.Tensor],
|
||||
boxes: Optional[torch.Tensor] = None,
|
||||
mask_input: Optional[torch.Tensor] = None,
|
||||
multimask_output: bool = True,
|
||||
return_logits: bool = False,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Predict masks for the given input prompts, using the currently set image.
|
||||
Input prompts are batched torch tensors and are expected to already be
|
||||
transformed to the input frame using ResizeLongestSide.
|
||||
|
||||
Arguments:
|
||||
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
|
||||
model. Each point is in (X,Y) in pixels.
|
||||
point_labels (torch.Tensor or None): A BxN array of labels for the
|
||||
point prompts. 1 indicates a foreground point and 0 indicates a
|
||||
background point.
|
||||
boxes (np.ndarray or None): A Bx4 array given a box prompt to the
|
||||
model, in XYXY format.
|
||||
mask_input (np.ndarray): A low resolution mask input to the model, typically
|
||||
coming from a previous prediction iteration. Has form Bx1xHxW, where
|
||||
for SAM, H=W=256. Masks returned by a previous iteration of the
|
||||
predict method do not need further transformation.
|
||||
multimask_output (bool): If true, the model will return three masks.
|
||||
For ambiguous input prompts (such as a single click), this will often
|
||||
produce better masks than a single prediction. If only a single
|
||||
mask is needed, the model's predicted quality score can be used
|
||||
to select the best mask. For non-ambiguous prompts, such as multiple
|
||||
input prompts, multimask_output=False can give better results.
|
||||
return_logits (bool): If true, returns un-thresholded masks logits
|
||||
instead of a binary mask.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): The output masks in BxCxHxW format, where C is the
|
||||
number of masks, and (H, W) is the original image size.
|
||||
(torch.Tensor): An array of shape BxC containing the model's
|
||||
predictions for the quality of each mask.
|
||||
(torch.Tensor): An array of shape BxCxHxW, where C is the number
|
||||
of masks and H=W=256. These low res logits can be passed to
|
||||
a subsequent iteration as mask input.
|
||||
"""
|
||||
if not self.is_image_set:
|
||||
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
|
||||
|
||||
if point_coords is not None:
|
||||
points = (point_coords, point_labels)
|
||||
else:
|
||||
points = None
|
||||
|
||||
# Embed prompts
|
||||
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
|
||||
points=points,
|
||||
boxes=boxes,
|
||||
masks=mask_input,
|
||||
)
|
||||
|
||||
# Predict masks
|
||||
low_res_masks, iou_predictions = self.model.mask_decoder(
|
||||
image_embeddings=self.features,
|
||||
image_pe=self.model.prompt_encoder.get_dense_pe(),
|
||||
sparse_prompt_embeddings=sparse_embeddings,
|
||||
dense_prompt_embeddings=dense_embeddings,
|
||||
multimask_output=multimask_output,
|
||||
)
|
||||
|
||||
# Upscale the masks to the original image resolution
|
||||
masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
|
||||
|
||||
if not return_logits:
|
||||
masks = masks > self.model.mask_threshold
|
||||
|
||||
return masks, iou_predictions, low_res_masks
|
||||
|
||||
def get_image_embedding(self) -> torch.Tensor:
|
||||
"""
|
||||
Returns the image embeddings for the currently set image, with
|
||||
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
|
||||
the embedding spatial dimension of SAM (typically C=256, H=W=64).
|
||||
"""
|
||||
if not self.is_image_set:
|
||||
raise RuntimeError(
|
||||
"An image must be set with .set_image(...) to generate an embedding."
|
||||
)
|
||||
assert self.features is not None, "Features must exist if an image has been set."
|
||||
return self.features
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
return self.model.device
|
||||
|
||||
def reset_image(self) -> None:
|
||||
"""Resets the currently set image."""
|
||||
self.is_image_set = False
|
||||
self.features = None
|
||||
self.orig_h = None
|
||||
self.orig_w = None
|
||||
self.input_h = None
|
||||
self.input_w = None
|
||||
@@ -0,0 +1,5 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
@@ -0,0 +1,346 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
import math
|
||||
from copy import deepcopy
|
||||
from itertools import product
|
||||
from typing import Any, Dict, Generator, ItemsView, List, Tuple
|
||||
|
||||
|
||||
class MaskData:
|
||||
"""
|
||||
A structure for storing masks and their related data in batched format.
|
||||
Implements basic filtering and concatenation.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs) -> None:
|
||||
for v in kwargs.values():
|
||||
assert isinstance(
|
||||
v, (list, np.ndarray, torch.Tensor)
|
||||
), "MaskData only supports list, numpy arrays, and torch tensors."
|
||||
self._stats = dict(**kwargs)
|
||||
|
||||
def __setitem__(self, key: str, item: Any) -> None:
|
||||
assert isinstance(
|
||||
item, (list, np.ndarray, torch.Tensor)
|
||||
), "MaskData only supports list, numpy arrays, and torch tensors."
|
||||
self._stats[key] = item
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
del self._stats[key]
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
return self._stats[key]
|
||||
|
||||
def items(self) -> ItemsView[str, Any]:
|
||||
return self._stats.items()
|
||||
|
||||
def filter(self, keep: torch.Tensor) -> None:
|
||||
for k, v in self._stats.items():
|
||||
if v is None:
|
||||
self._stats[k] = None
|
||||
elif isinstance(v, torch.Tensor):
|
||||
self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
|
||||
elif isinstance(v, np.ndarray):
|
||||
self._stats[k] = v[keep.detach().cpu().numpy()]
|
||||
elif isinstance(v, list) and keep.dtype == torch.bool:
|
||||
self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
|
||||
elif isinstance(v, list):
|
||||
self._stats[k] = [v[i] for i in keep]
|
||||
else:
|
||||
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
|
||||
|
||||
def cat(self, new_stats: "MaskData") -> None:
|
||||
for k, v in new_stats.items():
|
||||
if k not in self._stats or self._stats[k] is None:
|
||||
self._stats[k] = deepcopy(v)
|
||||
elif isinstance(v, torch.Tensor):
|
||||
self._stats[k] = torch.cat([self._stats[k], v], dim=0)
|
||||
elif isinstance(v, np.ndarray):
|
||||
self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
|
||||
elif isinstance(v, list):
|
||||
self._stats[k] = self._stats[k] + deepcopy(v)
|
||||
else:
|
||||
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
|
||||
|
||||
def to_numpy(self) -> None:
|
||||
for k, v in self._stats.items():
|
||||
if isinstance(v, torch.Tensor):
|
||||
self._stats[k] = v.detach().cpu().numpy()
|
||||
|
||||
|
||||
def is_box_near_crop_edge(
|
||||
boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
|
||||
) -> torch.Tensor:
|
||||
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
|
||||
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
|
||||
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
|
||||
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
|
||||
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
|
||||
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
|
||||
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
|
||||
return torch.any(near_crop_edge, dim=1)
|
||||
|
||||
|
||||
def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
|
||||
box_xywh = deepcopy(box_xyxy)
|
||||
box_xywh[2] = box_xywh[2] - box_xywh[0]
|
||||
box_xywh[3] = box_xywh[3] - box_xywh[1]
|
||||
return box_xywh
|
||||
|
||||
|
||||
def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
|
||||
assert len(args) > 0 and all(
|
||||
len(a) == len(args[0]) for a in args
|
||||
), "Batched iteration must have inputs of all the same size."
|
||||
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
|
||||
for b in range(n_batches):
|
||||
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
|
||||
|
||||
|
||||
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Encodes masks to an uncompressed RLE, in the format expected by
|
||||
pycoco tools.
|
||||
"""
|
||||
# Put in fortran order and flatten h,w
|
||||
b, h, w = tensor.shape
|
||||
tensor = tensor.permute(0, 2, 1).flatten(1)
|
||||
|
||||
# Compute change indices
|
||||
diff = tensor[:, 1:] ^ tensor[:, :-1]
|
||||
change_indices = diff.nonzero()
|
||||
|
||||
# Encode run length
|
||||
out = []
|
||||
for i in range(b):
|
||||
cur_idxs = change_indices[change_indices[:, 0] == i, 1]
|
||||
cur_idxs = torch.cat(
|
||||
[
|
||||
torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
|
||||
cur_idxs + 1,
|
||||
torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
|
||||
]
|
||||
)
|
||||
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
|
||||
counts = [] if tensor[i, 0] == 0 else [0]
|
||||
counts.extend(btw_idxs.detach().cpu().tolist())
|
||||
out.append({"size": [h, w], "counts": counts})
|
||||
return out
|
||||
|
||||
|
||||
def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
|
||||
"""Compute a binary mask from an uncompressed RLE."""
|
||||
h, w = rle["size"]
|
||||
mask = np.empty(h * w, dtype=bool)
|
||||
idx = 0
|
||||
parity = False
|
||||
for count in rle["counts"]:
|
||||
mask[idx : idx + count] = parity
|
||||
idx += count
|
||||
parity ^= True
|
||||
mask = mask.reshape(w, h)
|
||||
return mask.transpose() # Put in C order
|
||||
|
||||
|
||||
def area_from_rle(rle: Dict[str, Any]) -> int:
|
||||
return sum(rle["counts"][1::2])
|
||||
|
||||
|
||||
def calculate_stability_score(
|
||||
masks: torch.Tensor, mask_threshold: float, threshold_offset: float
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Computes the stability score for a batch of masks. The stability
|
||||
score is the IoU between the binary masks obtained by thresholding
|
||||
the predicted mask logits at high and low values.
|
||||
"""
|
||||
# One mask is always contained inside the other.
|
||||
# Save memory by preventing unnecessary cast to torch.int64
|
||||
intersections = (
|
||||
(masks > (mask_threshold + threshold_offset))
|
||||
.sum(-1, dtype=torch.int16)
|
||||
.sum(-1, dtype=torch.int32)
|
||||
)
|
||||
unions = (
|
||||
(masks > (mask_threshold - threshold_offset))
|
||||
.sum(-1, dtype=torch.int16)
|
||||
.sum(-1, dtype=torch.int32)
|
||||
)
|
||||
return intersections / unions
|
||||
|
||||
|
||||
def build_point_grid(n_per_side: int) -> np.ndarray:
|
||||
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
|
||||
offset = 1 / (2 * n_per_side)
|
||||
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
|
||||
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
|
||||
points_y = np.tile(points_one_side[:, None], (1, n_per_side))
|
||||
points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
|
||||
return points
|
||||
|
||||
|
||||
def build_all_layer_point_grids(
|
||||
n_per_side: int, n_layers: int, scale_per_layer: int
|
||||
) -> List[np.ndarray]:
|
||||
"""Generates point grids for all crop layers."""
|
||||
points_by_layer = []
|
||||
for i in range(n_layers + 1):
|
||||
n_points = int(n_per_side / (scale_per_layer**i))
|
||||
points_by_layer.append(build_point_grid(n_points))
|
||||
return points_by_layer
|
||||
|
||||
|
||||
def generate_crop_boxes(
|
||||
im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
|
||||
) -> Tuple[List[List[int]], List[int]]:
|
||||
"""
|
||||
Generates a list of crop boxes of different sizes. Each layer
|
||||
has (2**i)**2 boxes for the ith layer.
|
||||
"""
|
||||
crop_boxes, layer_idxs = [], []
|
||||
im_h, im_w = im_size
|
||||
short_side = min(im_h, im_w)
|
||||
|
||||
# Original image
|
||||
crop_boxes.append([0, 0, im_w, im_h])
|
||||
layer_idxs.append(0)
|
||||
|
||||
def crop_len(orig_len, n_crops, overlap):
|
||||
return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
|
||||
|
||||
for i_layer in range(n_layers):
|
||||
n_crops_per_side = 2 ** (i_layer + 1)
|
||||
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
|
||||
|
||||
crop_w = crop_len(im_w, n_crops_per_side, overlap)
|
||||
crop_h = crop_len(im_h, n_crops_per_side, overlap)
|
||||
|
||||
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
|
||||
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
|
||||
|
||||
# Crops in XYWH format
|
||||
for x0, y0 in product(crop_box_x0, crop_box_y0):
|
||||
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
|
||||
crop_boxes.append(box)
|
||||
layer_idxs.append(i_layer + 1)
|
||||
|
||||
return crop_boxes, layer_idxs
|
||||
|
||||
|
||||
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
|
||||
x0, y0, _, _ = crop_box
|
||||
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
|
||||
# Check if boxes has a channel dimension
|
||||
if len(boxes.shape) == 3:
|
||||
offset = offset.unsqueeze(1)
|
||||
return boxes + offset
|
||||
|
||||
|
||||
def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
|
||||
x0, y0, _, _ = crop_box
|
||||
offset = torch.tensor([[x0, y0]], device=points.device)
|
||||
# Check if points has a channel dimension
|
||||
if len(points.shape) == 3:
|
||||
offset = offset.unsqueeze(1)
|
||||
return points + offset
|
||||
|
||||
|
||||
def uncrop_masks(
|
||||
masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
|
||||
) -> torch.Tensor:
|
||||
x0, y0, x1, y1 = crop_box
|
||||
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
|
||||
return masks
|
||||
# Coordinate transform masks
|
||||
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
|
||||
pad = (x0, pad_x - x0, y0, pad_y - y0)
|
||||
return torch.nn.functional.pad(masks, pad, value=0)
|
||||
|
||||
|
||||
def remove_small_regions(
|
||||
mask: np.ndarray, area_thresh: float, mode: str
|
||||
) -> Tuple[np.ndarray, bool]:
|
||||
"""
|
||||
Removes small disconnected regions and holes in a mask. Returns the
|
||||
mask and an indicator of if the mask has been modified.
|
||||
"""
|
||||
import cv2 # type: ignore
|
||||
|
||||
assert mode in ["holes", "islands"]
|
||||
correct_holes = mode == "holes"
|
||||
working_mask = (correct_holes ^ mask).astype(np.uint8)
|
||||
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
|
||||
sizes = stats[:, -1][1:] # Row 0 is background label
|
||||
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
|
||||
if len(small_regions) == 0:
|
||||
return mask, False
|
||||
fill_labels = [0] + small_regions
|
||||
if not correct_holes:
|
||||
fill_labels = [i for i in range(n_labels) if i not in fill_labels]
|
||||
# If every region is below threshold, keep largest
|
||||
if len(fill_labels) == 0:
|
||||
fill_labels = [int(np.argmax(sizes)) + 1]
|
||||
mask = np.isin(regions, fill_labels)
|
||||
return mask, True
|
||||
|
||||
|
||||
def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
|
||||
from pycocotools import mask as mask_utils # type: ignore
|
||||
|
||||
h, w = uncompressed_rle["size"]
|
||||
rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
|
||||
rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
|
||||
return rle
|
||||
|
||||
|
||||
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
|
||||
an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
|
||||
"""
|
||||
# torch.max below raises an error on empty inputs, just skip in this case
|
||||
if torch.numel(masks) == 0:
|
||||
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
|
||||
|
||||
# Normalize shape to CxHxW
|
||||
shape = masks.shape
|
||||
h, w = shape[-2:]
|
||||
if len(shape) > 2:
|
||||
masks = masks.flatten(0, -3)
|
||||
else:
|
||||
masks = masks.unsqueeze(0)
|
||||
|
||||
# Get top and bottom edges
|
||||
in_height, _ = torch.max(masks, dim=-1)
|
||||
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
|
||||
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
|
||||
in_height_coords = in_height_coords + h * (~in_height)
|
||||
top_edges, _ = torch.min(in_height_coords, dim=-1)
|
||||
|
||||
# Get left and right edges
|
||||
in_width, _ = torch.max(masks, dim=-2)
|
||||
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
|
||||
right_edges, _ = torch.max(in_width_coords, dim=-1)
|
||||
in_width_coords = in_width_coords + w * (~in_width)
|
||||
left_edges, _ = torch.min(in_width_coords, dim=-1)
|
||||
|
||||
# If the mask is empty the right edge will be to the left of the left edge.
|
||||
# Replace these boxes with [0, 0, 0, 0]
|
||||
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
|
||||
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
|
||||
out = out * (~empty_filter).unsqueeze(-1)
|
||||
|
||||
# Return to original shape
|
||||
if len(shape) > 2:
|
||||
out = out.reshape(*shape[:-2], 4)
|
||||
else:
|
||||
out = out[0]
|
||||
|
||||
return out
|
||||
@@ -0,0 +1,144 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
from ..modeling import Sam
|
||||
from .amg import calculate_stability_score
|
||||
|
||||
|
||||
class SamOnnxModel(nn.Module):
|
||||
"""
|
||||
This model should not be called directly, but is used in ONNX export.
|
||||
It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
|
||||
with some functions modified to enable model tracing. Also supports extra
|
||||
options controlling what information. See the ONNX export script for details.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: Sam,
|
||||
return_single_mask: bool,
|
||||
use_stability_score: bool = False,
|
||||
return_extra_metrics: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.mask_decoder = model.mask_decoder
|
||||
self.model = model
|
||||
self.img_size = model.image_encoder.img_size
|
||||
self.return_single_mask = return_single_mask
|
||||
self.use_stability_score = use_stability_score
|
||||
self.stability_score_offset = 1.0
|
||||
self.return_extra_metrics = return_extra_metrics
|
||||
|
||||
@staticmethod
|
||||
def resize_longest_image_size(
|
||||
input_image_size: torch.Tensor, longest_side: int
|
||||
) -> torch.Tensor:
|
||||
input_image_size = input_image_size.to(torch.float32)
|
||||
scale = longest_side / torch.max(input_image_size)
|
||||
transformed_size = scale * input_image_size
|
||||
transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
|
||||
return transformed_size
|
||||
|
||||
def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
|
||||
point_coords = point_coords + 0.5
|
||||
point_coords = point_coords / self.img_size
|
||||
point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
|
||||
point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
|
||||
|
||||
point_embedding = point_embedding * (point_labels != -1)
|
||||
point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
|
||||
point_labels == -1
|
||||
)
|
||||
|
||||
for i in range(self.model.prompt_encoder.num_point_embeddings):
|
||||
point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
|
||||
i
|
||||
].weight * (point_labels == i)
|
||||
|
||||
return point_embedding
|
||||
|
||||
def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
|
||||
mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
|
||||
mask_embedding = mask_embedding + (
|
||||
1 - has_mask_input
|
||||
) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
|
||||
return mask_embedding
|
||||
|
||||
def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
|
||||
masks = F.interpolate(
|
||||
masks,
|
||||
size=(self.img_size, self.img_size),
|
||||
mode="bilinear",
|
||||
align_corners=False,
|
||||
)
|
||||
|
||||
prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(torch.int64)
|
||||
masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore
|
||||
|
||||
orig_im_size = orig_im_size.to(torch.int64)
|
||||
h, w = orig_im_size[0], orig_im_size[1]
|
||||
masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
|
||||
return masks
|
||||
|
||||
def select_masks(
|
||||
self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# Determine if we should return the multiclick mask or not from the number of points.
|
||||
# The reweighting is used to avoid control flow.
|
||||
score_reweight = torch.tensor(
|
||||
[[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
|
||||
).to(iou_preds.device)
|
||||
score = iou_preds + (num_points - 2.5) * score_reweight
|
||||
best_idx = torch.argmax(score, dim=1)
|
||||
masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
|
||||
iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
|
||||
|
||||
return masks, iou_preds
|
||||
|
||||
@torch.no_grad()
|
||||
def forward(
|
||||
self,
|
||||
image_embeddings: torch.Tensor,
|
||||
point_coords: torch.Tensor,
|
||||
point_labels: torch.Tensor,
|
||||
mask_input: torch.Tensor,
|
||||
has_mask_input: torch.Tensor,
|
||||
orig_im_size: torch.Tensor,
|
||||
):
|
||||
sparse_embedding = self._embed_points(point_coords, point_labels)
|
||||
dense_embedding = self._embed_masks(mask_input, has_mask_input)
|
||||
|
||||
masks, scores = self.model.mask_decoder.predict_masks(
|
||||
image_embeddings=image_embeddings,
|
||||
image_pe=self.model.prompt_encoder.get_dense_pe(),
|
||||
sparse_prompt_embeddings=sparse_embedding,
|
||||
dense_prompt_embeddings=dense_embedding,
|
||||
)
|
||||
|
||||
if self.use_stability_score:
|
||||
scores = calculate_stability_score(
|
||||
masks, self.model.mask_threshold, self.stability_score_offset
|
||||
)
|
||||
|
||||
if self.return_single_mask:
|
||||
masks, scores = self.select_masks(masks, scores, point_coords.shape[1])
|
||||
|
||||
upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
|
||||
|
||||
if self.return_extra_metrics:
|
||||
stability_scores = calculate_stability_score(
|
||||
upscaled_masks, self.model.mask_threshold, self.stability_score_offset
|
||||
)
|
||||
areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
|
||||
return upscaled_masks, scores, stability_scores, areas, masks
|
||||
|
||||
return upscaled_masks, scores, masks
|
||||
@@ -0,0 +1,102 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.nn import functional as F
|
||||
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
|
||||
|
||||
from copy import deepcopy
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
class ResizeLongestSide:
|
||||
"""
|
||||
Resizes images to the longest side 'target_length', as well as provides
|
||||
methods for resizing coordinates and boxes. Provides methods for
|
||||
transforming both numpy array and batched torch tensors.
|
||||
"""
|
||||
|
||||
def __init__(self, target_length: int) -> None:
|
||||
self.target_length = target_length
|
||||
|
||||
def apply_image(self, image: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Expects a numpy array with shape HxWxC in uint8 format.
|
||||
"""
|
||||
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
|
||||
return np.array(resize(to_pil_image(image), target_size))
|
||||
|
||||
def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
|
||||
"""
|
||||
Expects a numpy array of length 2 in the final dimension. Requires the
|
||||
original image size in (H, W) format.
|
||||
"""
|
||||
old_h, old_w = original_size
|
||||
new_h, new_w = self.get_preprocess_shape(
|
||||
original_size[0], original_size[1], self.target_length
|
||||
)
|
||||
coords = deepcopy(coords).astype(float)
|
||||
coords[..., 0] = coords[..., 0] * (new_w / old_w)
|
||||
coords[..., 1] = coords[..., 1] * (new_h / old_h)
|
||||
return coords
|
||||
|
||||
def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
|
||||
"""
|
||||
Expects a numpy array shape Bx4. Requires the original image size
|
||||
in (H, W) format.
|
||||
"""
|
||||
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
|
||||
return boxes.reshape(-1, 4)
|
||||
|
||||
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Expects batched images with shape BxCxHxW and float format. This
|
||||
transformation may not exactly match apply_image. apply_image is
|
||||
the transformation expected by the model.
|
||||
"""
|
||||
# Expects an image in BCHW format. May not exactly match apply_image.
|
||||
target_size = self.get_preprocess_shape(image.shape[2], image.shape[3], self.target_length)
|
||||
return F.interpolate(
|
||||
image, target_size, mode="bilinear", align_corners=False, antialias=True
|
||||
)
|
||||
|
||||
def apply_coords_torch(
|
||||
self, coords: torch.Tensor, original_size: Tuple[int, ...]
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Expects a torch tensor with length 2 in the last dimension. Requires the
|
||||
original image size in (H, W) format.
|
||||
"""
|
||||
old_h, old_w = original_size
|
||||
new_h, new_w = self.get_preprocess_shape(
|
||||
original_size[0], original_size[1], self.target_length
|
||||
)
|
||||
coords = deepcopy(coords).to(torch.float)
|
||||
coords[..., 0] = coords[..., 0] * (new_w / old_w)
|
||||
coords[..., 1] = coords[..., 1] * (new_h / old_h)
|
||||
return coords
|
||||
|
||||
def apply_boxes_torch(
|
||||
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Expects a torch tensor with shape Bx4. Requires the original image
|
||||
size in (H, W) format.
|
||||
"""
|
||||
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
|
||||
return boxes.reshape(-1, 4)
|
||||
|
||||
@staticmethod
|
||||
def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
|
||||
"""
|
||||
Compute the output size given input size and target long side length.
|
||||
"""
|
||||
scale = long_side_length * 1.0 / max(oldh, oldw)
|
||||
newh, neww = oldh * scale, oldw * scale
|
||||
neww = int(neww + 0.5)
|
||||
newh = int(newh + 0.5)
|
||||
return (newh, neww)
|
||||
11
custom_nodes/was-node-suite-comfyui/repos/SAM/setup.cfg
Normal file
@@ -0,0 +1,11 @@
|
||||
[isort]
|
||||
line_length=100
|
||||
multi_line_output=3
|
||||
include_trailing_comma=True
|
||||
known_standard_library=numpy,setuptools
|
||||
skip_glob=*/__init__.py
|
||||
known_myself=segment_anything
|
||||
known_third_party=matplotlib,cv2,torch,torchvision,pycocotools,onnx,black,isort
|
||||
no_lines_before=STDLIB,THIRDPARTY
|
||||
sections=FUTURE,STDLIB,THIRDPARTY,MYSELF,FIRSTPARTY,LOCALFOLDER
|
||||
default_section=FIRSTPARTY
|
||||
18
custom_nodes/was-node-suite-comfyui/repos/SAM/setup.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
setup(
|
||||
name="segment_anything",
|
||||
version="1.0",
|
||||
install_requires=[],
|
||||
packages=find_packages(exclude="notebooks"),
|
||||
extras_require={
|
||||
"all": ["matplotlib", "pycocotools", "opencv-python", "onnx", "onnxruntime"],
|
||||
"dev": ["flake8", "isort", "black", "mypy"],
|
||||
},
|
||||
)
|
||||
20
custom_nodes/was-node-suite-comfyui/requirements.txt
Normal file
@@ -0,0 +1,20 @@
|
||||
cmake
|
||||
fairscale>=0.4.4
|
||||
git+https://github.com/WASasquatch/img2texture.git
|
||||
git+https://github.com/WASasquatch/cstr
|
||||
gitpython
|
||||
imageio
|
||||
joblib
|
||||
matplotlib
|
||||
numba
|
||||
numpy
|
||||
opencv-python-headless[ffmpeg]
|
||||
pilgram
|
||||
git+https://github.com/WASasquatch/ffmpy.git
|
||||
rembg
|
||||
scikit-image>=0.20.0
|
||||
scikit-learn
|
||||
scipy
|
||||
timm>=0.4.12
|
||||
tqdm
|
||||
transformers
|
||||
BIN
custom_nodes/was-node-suite-comfyui/res/font.ttf
Normal file
12213
custom_nodes/was-node-suite-comfyui/res/haarcascade_eye.xml
Normal file
24350
custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt.xml
Normal file
20719
custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt2.xml
Normal file
103493
custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt_tree.xml
Normal file
29690
custom_nodes/was-node-suite-comfyui/res/haarcascade_profileface.xml
Normal file
28134
custom_nodes/was-node-suite-comfyui/res/haarcascade_upperbody.xml
Normal file
6693
custom_nodes/was-node-suite-comfyui/res/lbpcascade_animeface.xml
Normal file
1
custom_nodes/was-node-suite-comfyui/tests/pytest.ini
Normal file
@@ -0,0 +1 @@
|
||||
[pytest]
|
||||
@@ -0,0 +1,69 @@
|
||||
from was_mock import was_text_sort
|
||||
|
||||
def test_empty_text():
|
||||
assert was_text_sort() == ""
|
||||
|
||||
def test_empty_text_with_separator_override():
|
||||
assert was_text_sort(separator="|") == ""
|
||||
|
||||
def test_already_sorted_text():
|
||||
assert was_text_sort("already, sorted, text") == "already, sorted, text"
|
||||
|
||||
def test_already_sorted_text_with_separator_override():
|
||||
assert was_text_sort("already, sorted, text", separator="|") == "already, sorted, text"
|
||||
|
||||
def test_with_alternative_separator():
|
||||
assert was_text_sort("test | with | alternative | separator", separator=" | ") == "alternative | separator | test | with"
|
||||
|
||||
def test_with_trailing_separators():
|
||||
assert was_text_sort("test, with, trailing, separator,") == "separator, test, trailing, with"
|
||||
|
||||
def test_with_tabs():
|
||||
assert was_text_sort("test,\t without, \tweights") == "test, weights, without"
|
||||
|
||||
def test_with_linefeed_newlines():
|
||||
assert was_text_sort("test,\n without, \nweights") == "test, weights, without"
|
||||
|
||||
def test_with_macos_pre_cheetah_newlines():
|
||||
assert was_text_sort("test,\r without, \rweights") == "test, weights, without"
|
||||
|
||||
def test_with_windows_newlines():
|
||||
assert was_text_sort("test,\r\n without, \r\nweights") == "test, weights, without"
|
||||
|
||||
def test_without_weights():
|
||||
assert was_text_sort("test, without, weights") == "test, weights, without"
|
||||
|
||||
def test_with_weights():
|
||||
assert was_text_sort("(test:1), (with:2.0), (weights:3.1)") == "(test:1), (weights:3.1), (with:2.0)"
|
||||
|
||||
def test_with_some_weights():
|
||||
assert was_text_sort("(test:1), with, some, (weights:3.1)") == "some, (test:1), (weights:3.1), with"
|
||||
|
||||
def test_with_half_weights():
|
||||
assert was_text_sort("(test:1), with, half (weights:3.1)") == "half (weights:3.1), (test:1), with"
|
||||
|
||||
# ASCII "_" is after uppercase and before lowercase letters
|
||||
def test_with_wildcards():
|
||||
assert was_text_sort("test, with, __wildcards__") == "__wildcards__, test, with"
|
||||
|
||||
def test_with_weighted_wildcards():
|
||||
assert was_text_sort("test, (with:2), (__wildcards__:3)") == "(__wildcards__:3), test, (with:2)"
|
||||
|
||||
# ASCII "{" is after all letters
|
||||
def test_with_dynamic_prompts():
|
||||
assert was_text_sort("test, {with|dynamic|prompts}") == "test, {with|dynamic|prompts}"
|
||||
|
||||
def test_with_weighted_dynamic_prompts():
|
||||
assert was_text_sort("(test:1.1), with, ({weighted|dynamic|prompts}:0.9)") == "(test:1.1), with, ({weighted|dynamic|prompts}:0.9)"
|
||||
|
||||
def test_with_embeddings():
|
||||
assert was_text_sort("test, with, embedding:my_embed.pt") == "embedding:my_embed.pt, test, with"
|
||||
|
||||
def test_with_lora():
|
||||
assert was_text_sort("test, with, lora:my_lora.safetensors") == "lora:my_lora.safetensors, test, with"
|
||||
|
||||
def test_with_grouped_weights():
|
||||
assert was_text_sort("(test, with:1), (grouped, weights:2.1)") == "(grouped, weights:2.1), (test, with:1)"
|
||||
|
||||
def test_with_nested_weights():
|
||||
assert was_text_sort("(test, (with:1.2):1.1), ((nested:1), weights:2)") == "((nested:1), weights:2), (test, (with:1.2):1.1)"
|
||||
11
custom_nodes/was-node-suite-comfyui/tests/was_mock.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# TODO: In case anyone that knows how to set up PyTest correctly comes around, this file can be scrapped.
|
||||
from pathlib import Path
|
||||
|
||||
TEXT_TYPE = "STRING"
|
||||
|
||||
CLASS_NAME = "WAS_Text_Sort"
|
||||
class_string = f"class {CLASS_NAME}:"
|
||||
exec(class_string + Path("../WAS_Node_Suite.py").read_text().split(class_string)[1].split("class ")[0])
|
||||
|
||||
def was_text_sort(text = "", separator = WAS_Text_Sort.INPUT_TYPES()["required"]["separator"][1]["default"]):
|
||||
return WAS_Text_Sort().sort(text, separator)[0]
|
||||