|  | 
| 100 | 100 |     "\n", | 
| 101 | 101 |     "from pip_helper import pip_install\n", | 
| 102 | 102 |     "\n", | 
| 103 |  | -    "# pip_install(\n", | 
| 104 |  | -    "#     \"-U\",\n", | 
| 105 |  | -    "#     \"torch>=2.1\",\n", | 
| 106 |  | -    "#     \"torchvision\",\n", | 
| 107 |  | -    "#     \"torchaudio\",\n", | 
| 108 |  | -    "#     \"--extra-index-url\",\n", | 
| 109 |  | -    "#     \"https://download.pytorch.org/whl/cpu\",\n", | 
| 110 |  | -    "# )\n", | 
| 111 |  | -    "pip_install(\n", | 
| 112 |  | -    "    \"gradio>=4.19\"\n", | 
| 113 |  | -    ")\n", | 
|  | 103 | +    "pip_install(\"gradio>=4.19\")\n", | 
| 114 | 104 |     "if platform.system() == \"Darwin\":\n", | 
| 115 | 105 |     "    pip_install(\"numpy<2.0\")\n", | 
| 116 | 106 |     "\n", | 
| 117 | 107 |     "pip_install(\n", | 
| 118 | 108 |     "    \"git+https://github.com/ace-step/ACE-Step.git@6ae0852b1388de6dc0cca26b31a86d711f723cb3\", \"--extra-index-url\", \"https://download.pytorch.org/whl/cpu\"\n", | 
| 119 | 109 |     ")\n", | 
| 120 |  | -    "# if platform.system() == \"Darwin\":\n", | 
| 121 |  | -    "#     pip_install(\"-U\", \"transformers>=4.51\")\n", | 
| 122 | 110 |     "\n", | 
| 123 | 111 |     "pip_install(\"openvino>=2025.1.0\", \"openvino-tokenizers>=2025.1.0\", \"nncf>=2.16.0\")\n", | 
| 124 | 112 |     "\n", | 
|  | 
| 390 | 378 |   }, | 
| 391 | 379 |   { | 
| 392 | 380 |    "cell_type": "code", | 
| 393 |  | -   "execution_count": 9, | 
|  | 381 | +   "execution_count": null, | 
| 394 | 382 |    "id": "f9db0d8a", | 
| 395 | 383 |    "metadata": {}, | 
| 396 | 384 |    "outputs": [ | 
|  | 
| 422 | 410 |     "import nncf\n", | 
| 423 | 411 |     "from ov_ace_helper import convert_models\n", | 
| 424 | 412 |     "\n", | 
|  | 413 | +    "ov_converted_model_dir = \"ov_models\"\n", | 
| 425 | 414 |     "if model_format.value == \"INT4\":\n", | 
| 426 | 415 |     "    weights_compression_config = {\"mode\": nncf.CompressWeightsMode.INT4_ASYM, \"group_size\": 128, \"ratio\": 0.8}\n", | 
|  | 416 | +    "    ov_converted_model_dir += \"_int4\"\n", | 
| 427 | 417 |     "elif model_format.value == \"INT8\":\n", | 
| 428 | 418 |     "    weights_compression_config = {\"mode\": nncf.CompressWeightsMode.INT8_ASYM}\n", | 
|  | 419 | +    "    ov_converted_model_dir += \"_int8\"\n", | 
| 429 | 420 |     "else:\n", | 
| 430 | 421 |     "    weights_compression_config = None\n", | 
| 431 | 422 |     "\n", | 
| 432 |  | -    "ov_converted_model_dir = \"ov_models\"\n", | 
| 433 | 423 |     "convert_models(pipeline, model_dir=ov_converted_model_dir, orig_checkpoint_path=checkpoint_dir, quantization_config=weights_compression_config)" | 
| 434 | 424 |    ] | 
| 435 | 425 |   }, | 
|  | 
| 648 | 638 |     "[back to top ⬆️](#Table-of-contents:)\n", | 
| 649 | 639 |     "\n", | 
| 650 | 640 |     "\n", | 
| 651 |  | -    "LoRA is a technique that allows to fine-tune large models with a small number of parameters. ACE Step support LoRA, more information about it can be find (here)[https://github.com/ace-step/ACE-Step?tab=readme-ov-file#-applications].\n", | 
|  | 641 | +    "LoRA is a technique that allows to fine-tune large models with a small number of parameters. ACE Step support LoRA, more information about it can be find [here](https://github.com/ace-step/ACE-Step?tab=readme-ov-file#-applications).\n", | 
| 652 | 642 |     "\n", | 
| 653 | 643 |     "Let's try LoRA. To use LoRA for ACE Step and OpenVINO, LoRA should be applied for the model and model should be converted to IR format." | 
| 654 | 644 |    ] | 
|  | 
0 commit comments