Skip to content

Commit

Permalink
rename to iopaint
Browse files Browse the repository at this point in the history
  • Loading branch information
Sanster committed Jan 5, 2024
1 parent f1f18aa commit a73e2a5
Show file tree
Hide file tree
Showing 101 changed files with 180 additions and 253 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ examples/
.idea/
.vscode/
build
!lama_cleaner/app/build
!iopaint/app/build
dist/
lama_cleaner.egg-info/
venv/
Expand Down
76 changes: 1 addition & 75 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,75 +1 @@
<p align="center">
<img alt="logo" height=256 src="./assets/logo.png" />
</p>
<h1 align="center">Lama Cleaner</h1>
<p align="center">A free and open-source inpainting tool powered by SOTA AI model.</p>

<p align="center">
<a href="https://github.com/Sanster/lama-cleaner">
<img alt="total download" src="https://pepy.tech/badge/lama-cleaner" />
</a>
<a href="https://pypi.org/project/lama-cleaner/">
<img alt="version" src="https://img.shields.io/pypi/v/lama-cleaner" />
</a>
<a href="https://colab.research.google.com/drive/1e3ZkAJxvkK3uzaTGu91N9TvI_Mahs0Wb?usp=sharing">
<img alt="Open in Colab" src="https://colab.research.google.com/assets/colab-badge.svg" />
</a>

<a href="https://huggingface.co/spaces/Sanster/Lama-Cleaner-lama">
<img alt="Hugging Face Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue" />
</a>

<a href="">
<img alt="python version" src="https://img.shields.io/pypi/pyversions/lama-cleaner" />
</a>
<a href="https://hub.docker.com/r/cwq1913/lama-cleaner">
<img alt="version" src="https://img.shields.io/docker/pulls/cwq1913/lama-cleaner" />
</a>
</p>

https://user-images.githubusercontent.com/3998421/196976498-ba1ad3ab-fa18-4c55-965f-5c6683141375.mp4

## Features

- Completely free and open-source, fully self-hosted, support CPU & GPU & M1/2
- [Windows 1-Click Installer](https://lama-cleaner-docs.vercel.app/install/windows_1click_installer)
- [Native macOS app](https://opticlean.io/)
- Multiple SOTA AI [models](https://lama-cleaner-docs.vercel.app/models)
- Erase model: LaMa/LDM/ZITS/MAT/FcF/Manga
- Erase and Replace model: Stable Diffusion/Paint by Example
- [Plugins](https://lama-cleaner-docs.vercel.app/plugins) for post-processing:
- [RemoveBG](https://github.com/danielgatis/rembg): Remove images background
- [RealESRGAN](https://github.com/xinntao/Real-ESRGAN): Super Resolution
- [GFPGAN](https://github.com/TencentARC/GFPGAN): Face Restoration
- [RestoreFormer](https://github.com/wzhouxiff/RestoreFormer): Face Restoration
- [Segment Anything](https://lama-cleaner-docs.vercel.app/plugins#interactive-segmentation): Accurate and fast interactive object segmentation
- [FileManager](https://lama-cleaner-docs.vercel.app/features/file_manager): Browse your pictures conveniently and save them directly to the output directory.
- More features at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/)

## Quick Start

Lama Cleaner make it easy to use SOTA AI model in just two commands:

```bash
# In order to use the GPU, install cuda version of pytorch first.
# pip install torch==1.13.1+cu117 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu117
pip install lama-cleaner
lama-cleaner --model=lama --device=cpu --port=8080
```

That's it, Lama Cleaner is now running at http://localhost:8080

See all command line arguments at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/install/pip)

## Development

Only needed if you plan to modify the frontend and recompile yourself.

### Frontend

Frontend code are modified from [cleanup.pictures](https://github.com/initml/cleanup.pictures), You can experience their
great online services [here](https://cleanup.pictures/).

- Install dependencies:`cd lama_cleaner/app/ && pnpm install`
- Start development server: `pnpm start`
- Build: `pnpm build`
# IOPaint
2 changes: 1 addition & 1 deletion lama_cleaner/__init__.py → iopaint/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,6 @@
def entry_point():
# To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers
# https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18
from lama_cleaner.cli import typer_app
from iopaint.cli import typer_app

typer_app()
18 changes: 9 additions & 9 deletions lama_cleaner/api.py → iopaint/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
from loguru import logger
from socketio import AsyncServer

from lama_cleaner.file_manager import FileManager
from lama_cleaner.helper import (
from iopaint.file_manager import FileManager
from iopaint.helper import (
load_img,
decode_base64_to_image,
pil_to_bytes,
Expand All @@ -31,12 +31,12 @@
gen_frontend_mask,
adjust_mask,
)
from lama_cleaner.model.utils import torch_gc
from lama_cleaner.model_info import ModelInfo
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.plugins import build_plugins
from lama_cleaner.plugins.base_plugin import BasePlugin
from lama_cleaner.schema import (
from iopaint.model.utils import torch_gc
from iopaint.model_info import ModelInfo
from iopaint.model_manager import ModelManager
from iopaint.plugins import build_plugins
from iopaint.plugins.base_plugin import BasePlugin
from iopaint.schema import (
GenInfoResponse,
ApiConfig,
ServerConfigResponse,
Expand Down Expand Up @@ -356,7 +356,7 @@ def _build_model_manager(self):


if __name__ == "__main__":
from lama_cleaner.schema import InteractiveSegModel, RealESRGANModel
from iopaint.schema import InteractiveSegModel, RealESRGANModel

app = FastAPI()
api = Api(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
TimeRemainingColumn,
)

from lama_cleaner.helper import pil_to_bytes
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import InpaintRequest
from iopaint.helper import pil_to_bytes
from iopaint.model_manager import ModelManager
from iopaint.schema import InpaintRequest


def glob_images(path: Path) -> Dict[str, Path]:
Expand Down
4 changes: 2 additions & 2 deletions lama_cleaner/benchmark.py → iopaint/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
import psutil
import torch

from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import InpaintRequest, HDStrategy, SDSampler
from iopaint.model_manager import ModelManager
from iopaint.schema import InpaintRequest, HDStrategy, SDSampler

try:
torch._C._jit_override_can_fuse_on_cpu(False)
Expand Down
16 changes: 8 additions & 8 deletions lama_cleaner/cli.py → iopaint/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,16 @@
from loguru import logger
from typer import Option

from lama_cleaner.const import *
from lama_cleaner.download import cli_download_model, scan_models
from lama_cleaner.runtime import setup_model_dir, dump_environment_info, check_device
from iopaint.const import *
from iopaint.download import cli_download_model, scan_models
from iopaint.runtime import setup_model_dir, dump_environment_info, check_device

typer_app = typer.Typer(pretty_exceptions_show_locals=False, add_completion=False)


@typer_app.command(help="Install all plugins dependencies")
def install_plugins_packages():
from lama_cleaner.installer import install_plugins_package
from iopaint.installer import install_plugins_package

install_plugins_package()

Expand Down Expand Up @@ -67,12 +67,12 @@ def run(
logger.info(f"{model} not found in {model_dir}, try to downloading")
cli_download_model(model, model_dir)

from lama_cleaner.batch_processing import batch_inpaint
from iopaint.batch_processing import batch_inpaint

batch_inpaint(model, device, image, mask, output, config, concat)


@typer_app.command(help="Start lama cleaner server")
@typer_app.command(help="Start IOPaint server")
def start(
host: str = Option("127.0.0.1"),
port: int = Option(8080),
Expand Down Expand Up @@ -136,8 +136,8 @@ def start(
logger.info(f"{model} not found in {model_dir}, try to downloading")
cli_download_model(model, model_dir)

from lama_cleaner.api import Api
from lama_cleaner.schema import ApiConfig
from iopaint.api import Api
from iopaint.schema import ApiConfig

app = FastAPI()
api = Api(
Expand Down
File renamed without changes.
12 changes: 6 additions & 6 deletions lama_cleaner/download.py → iopaint/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,21 @@
from loguru import logger
from pathlib import Path

from lama_cleaner.const import (
from iopaint.const import (
DEFAULT_MODEL_DIR,
DIFFUSERS_SD_CLASS_NAME,
DIFFUSERS_SD_INPAINT_CLASS_NAME,
DIFFUSERS_SDXL_CLASS_NAME,
DIFFUSERS_SDXL_INPAINT_CLASS_NAME,
)
from lama_cleaner.model.utils import handle_from_pretrained_exceptions
from lama_cleaner.model_info import ModelInfo, ModelType
from lama_cleaner.runtime import setup_model_dir
from iopaint.model.utils import handle_from_pretrained_exceptions
from iopaint.model_info import ModelInfo, ModelType
from iopaint.runtime import setup_model_dir


def cli_download_model(model: str, model_dir: Path):
setup_model_dir(model_dir)
from lama_cleaner.model import models
from iopaint.model import models

if model in models and models[model].is_erase_model:
logger.info(f"Downloading {model}...")
Expand Down Expand Up @@ -85,7 +85,7 @@ def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]:

def scan_inpaint_models(model_dir: Path) -> List[ModelInfo]:
res = []
from lama_cleaner.model import models
from iopaint.model import models

# logger.info(f"Scanning inpaint models in {model_dir}")

Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
10 changes: 5 additions & 5 deletions lama_cleaner/helper.py → iopaint/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from PIL import Image, ImageOps, PngImagePlugin
import numpy as np
import torch
from lama_cleaner.const import MPS_UNSUPPORT_MODELS
from iopaint.const import MPS_UNSUPPORT_MODELS
from loguru import logger
from torch.hub import download_url_to_file, get_dir
import hashlib
Expand Down Expand Up @@ -56,12 +56,12 @@ def download_model(url, model_md5: str = None):
try:
os.remove(cached_file)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart lama-cleaner."
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart iopaint."
)
exit(-1)

Expand All @@ -80,12 +80,12 @@ def handle_error(model_path, model_md5, e):
try:
os.remove(model_path)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart lama-cleaner."
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart iopaint."
)
else:
logger.error(
Expand Down
File renamed without changes.
File renamed without changes.
8 changes: 4 additions & 4 deletions lama_cleaner/model/base.py → iopaint/model/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@
import numpy as np
from loguru import logger

from lama_cleaner.helper import (
from iopaint.helper import (
boxes_from_mask,
resize_max_size,
pad_img_to_modulo,
switch_mps_device,
)
from lama_cleaner.model.helper.g_diffuser_bot import expand_image
from lama_cleaner.model.utils import get_scheduler
from lama_cleaner.schema import InpaintRequest, HDStrategy, SDSampler
from iopaint.model.helper.g_diffuser_bot import expand_image
from iopaint.model.utils import get_scheduler
from iopaint.schema import InpaintRequest, HDStrategy, SDSampler


class InpaintModel:
Expand Down
10 changes: 5 additions & 5 deletions lama_cleaner/model/controlnet.py → iopaint/model/controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,16 @@
from diffusers import ControlNetModel, DiffusionPipeline
from loguru import logger

from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.helper.controlnet_preprocess import (
from iopaint.model.base import DiffusionInpaintModel
from iopaint.model.helper.controlnet_preprocess import (
make_canny_control_image,
make_openpose_control_image,
make_depth_control_image,
make_inpaint_control_image,
)
from lama_cleaner.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from lama_cleaner.model.utils import get_scheduler, handle_from_pretrained_exceptions
from lama_cleaner.schema import InpaintRequest, ModelType
from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from iopaint.model.utils import get_scheduler, handle_from_pretrained_exceptions
from iopaint.schema import InpaintRequest, ModelType


class ControlNet(DiffusionInpaintModel):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import numpy as np
from tqdm import tqdm

from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from iopaint.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like

from loguru import logger

Expand Down
8 changes: 4 additions & 4 deletions lama_cleaner/model/fcf.py → iopaint/model/fcf.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,21 @@
import numpy as np
import torch.fft as fft

from lama_cleaner.schema import InpaintRequest
from iopaint.schema import InpaintRequest

from lama_cleaner.helper import (
from iopaint.helper import (
load_model,
get_cache_path_by_url,
norm_img,
boxes_from_mask,
resize_max_size,
download_model,
)
from lama_cleaner.model.base import InpaintModel
from iopaint.model.base import InpaintModel
from torch import conv2d, nn
import torch.nn.functional as F

from lama_cleaner.model.utils import (
from iopaint.model.utils import (
setup_filter,
_parse_scaling,
_parse_padding,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import torch
from lama_cleaner.model.utils import torch_gc
from iopaint.model.utils import torch_gc


class CPUTextEncoderWrapper(torch.nn.Module):
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import torch
from loguru import logger

from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.schema import InpaintRequest
from iopaint.model.base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest


class InstructPix2Pix(DiffusionInpaintModel):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
import numpy as np
import torch

from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import get_scheduler
from lama_cleaner.schema import InpaintRequest
from iopaint.model.base import DiffusionInpaintModel
from iopaint.model.utils import get_scheduler
from iopaint.schema import InpaintRequest


class Kandinsky(DiffusionInpaintModel):
Expand Down
Loading

0 comments on commit a73e2a5

Please sign in to comment.