|
473 | 473 | " )\n",
|
474 | 474 | "\n",
|
475 | 475 | " if VAL_AMP:\n",
|
476 |
| - " with torch.cuda.amp.autocast():\n", |
| 476 | + " with torch.autocast(\"cuda\"):\n", |
477 | 477 | " return _compute(input)\n",
|
478 | 478 | " else:\n",
|
479 | 479 | " return _compute(input)\n",
|
480 | 480 | "\n",
|
481 | 481 | "\n",
|
482 | 482 | "# use amp to accelerate training\n",
|
483 |
| - "scaler = torch.cuda.amp.GradScaler()\n", |
| 483 | + "scaler = torch.GradScaler(\"cuda\")\n", |
484 | 484 | "# enable cuDNN benchmark\n",
|
485 | 485 | "torch.backends.cudnn.benchmark = True"
|
486 | 486 | ]
|
|
526 | 526 | " batch_data[\"label\"].to(device),\n",
|
527 | 527 | " )\n",
|
528 | 528 | " optimizer.zero_grad()\n",
|
529 |
| - " with torch.cuda.amp.autocast():\n", |
| 529 | + " with torch.autocast(\"cuda\"):\n", |
530 | 530 | " outputs = model(inputs)\n",
|
531 | 531 | " loss = loss_function(outputs, labels)\n",
|
532 | 532 | " scaler.scale(loss).backward()\n",
|
|
733 | 733 | }
|
734 | 734 | ],
|
735 | 735 | "source": [
|
736 |
| - "model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\")))\n", |
| 736 | + "model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\"), weights_only=True))\n", |
737 | 737 | "model.eval()\n",
|
738 | 738 | "with torch.no_grad():\n",
|
739 | 739 | " # select one image to evaluate and visualize the model output\n",
|
|
835 | 835 | }
|
836 | 836 | ],
|
837 | 837 | "source": [
|
838 |
| - "model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\")))\n", |
| 838 | + "model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\"), weights_only=True))\n", |
839 | 839 | "model.eval()\n",
|
840 | 840 | "\n",
|
841 | 841 | "with torch.no_grad():\n",
|
|
924 | 924 | " )\n",
|
925 | 925 | "\n",
|
926 | 926 | " if VAL_AMP:\n",
|
927 |
| - " with torch.cuda.amp.autocast():\n", |
| 927 | + " with torch.autocast(\"cuda\"):\n", |
928 | 928 | " return _compute(input)\n",
|
929 | 929 | " else:\n",
|
930 | 930 | " return _compute(input)"
|
|
977 | 977 | "source": [
|
978 | 978 | "onnx_model_path = os.path.join(root_dir, \"best_metric_model.onnx\")\n",
|
979 | 979 | "ort_session = onnxruntime.InferenceSession(onnx_model_path)\n",
|
980 |
| - "model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\")))\n", |
| 980 | + "model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\"), weights_only=True))\n", |
981 | 981 | "model.eval()\n",
|
982 | 982 | "\n",
|
983 | 983 | "with torch.no_grad():\n",
|
|
0 commit comments