diff --git a/samples/gstreamer/gst_launch/action_recognition/action_recognition.sh b/samples/gstreamer/gst_launch/action_recognition/action_recognition.sh index 6b9cdda0..239ab5be 100755 --- a/samples/gstreamer/gst_launch/action_recognition/action_recognition.sh +++ b/samples/gstreamer/gst_launch/action_recognition/action_recognition.sh @@ -13,6 +13,18 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [DEVICE] [OUTPUT]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " DEVICE - Device (default: CPU). Supported: CPU, GPU, NPU" + echo " OUTPUT - Output type (default: file). Supported: file, display, fps, json, display-and-json" + echo "" + exit 0 +fi + INPUT=${1:-https://videos.pexels.com/video-files/5144823/5144823-uhd_3840_2160_25fps.mp4} DEVICE=${2:-CPU} OUTPUT=${3:-file} # Supported values: display, fps, json, display-and-json, file @@ -27,7 +39,7 @@ if [[ -z $INPUT ]]; then fi if [[ $OUTPUT == "display" ]]; then - SINK_ELEMENT="gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "fps" ]]; then SINK_ELEMENT=" gvafpscounter ! fakesink async=false" elif [[ $OUTPUT == "json" ]]; then @@ -35,7 +47,7 @@ elif [[ $OUTPUT == "json" ]]; then SINK_ELEMENT=" gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false" elif [[ $OUTPUT == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "file" ]]; then FILE="$(basename ${INPUT%.*})" rm -f "action_recognition_${FILE}_${DEVICE}.mp4" @@ -47,7 +59,7 @@ elif [[ $OUTPUT == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=action_recognition_${FILE}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=action_recognition_${FILE}_${DEVICE}.mp4" else echo Error wrong value for OUTPUT parameter echo Valid values: "display" - render to screen, "file" - render to file, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json diff --git a/samples/gstreamer/gst_launch/custom_postproc/classify/build_and_run.sh b/samples/gstreamer/gst_launch/custom_postproc/classify/build_and_run.sh index 3b2e9c41..795da188 100755 --- a/samples/gstreamer/gst_launch/custom_postproc/classify/build_and_run.sh +++ b/samples/gstreamer/gst_launch/custom_postproc/classify/build_and_run.sh @@ -18,6 +18,18 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# Check help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [DEVICE] [OUTPUT] [INPUT]" + echo "" + echo "Arguments:" + echo " DEVICE - Device (default: GPU). Supported: CPU, GPU, NPU" + echo " OUTPUT - Output type (default: file). Supported: file, display, fps, json, display-and-json" + echo " INPUT - Input source (default: Pexels video URL)" + echo "" + exit 0 +fi + DETECT_MODEL="centerface" DETECT_MODEL_PATH="$MODELS_PATH/public/$DETECT_MODEL/FP32/$DETECT_MODEL.xml" @@ -89,9 +101,9 @@ if [[ "$OUTPUT" == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit 1 fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=$OUTPUT_FILE" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=$OUTPUT_FILE" elif [[ "$OUTPUT" == "display" ]] || [[ -z $OUTPUT ]]; then - SINK_ELEMENT="gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" elif [[ "$OUTPUT" == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false" elif [[ "$OUTPUT" == "json" ]]; then @@ -105,7 +117,7 @@ elif [[ "$OUTPUT" == "display-and-json" ]]; then if [ -f "$OUTPUT_FILE" ]; then rm "$OUTPUT_FILE" fi - SINK_ELEMENT="gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=$OUTPUT_FILE ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=$OUTPUT_FILE ! videoconvert ! gvafpscounter ! autovideosink sync=false" else echo Error wrong value for SINK_ELEMENT parameter echo Valid values: "file" - render to file, "display" - render to screen, "fps" - print FPS, "json" - write to json file, "display-and-json" - render to screen and write to json file diff --git a/samples/gstreamer/gst_launch/custom_postproc/detect/build_and_run.sh b/samples/gstreamer/gst_launch/custom_postproc/detect/build_and_run.sh index e4f7ad97..e5ffd435 100755 --- a/samples/gstreamer/gst_launch/custom_postproc/detect/build_and_run.sh +++ b/samples/gstreamer/gst_launch/custom_postproc/detect/build_and_run.sh @@ -18,6 +18,18 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# Print help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [DEVICE] [OUTPUT] [INPUT]" + echo "" + echo "Arguments:" + echo " DEVICE - Device (default: GPU). Supported: CPU, GPU, NPU" + echo " OUTPUT - Output type (default: file). Supported: file, display, fps, json, display-and-json" + echo " INPUT - Input source (default: Pexels video URL)" + echo "" + exit 0 +fi + MODEL="yolo11s" MODEL_PATH="$MODELS_PATH/public/$MODEL/FP32/$MODEL.xml" @@ -81,9 +93,9 @@ if [[ "$OUTPUT" == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit 1 fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=$OUTPUT_FILE" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=$OUTPUT_FILE" elif [[ "$OUTPUT" == "display" ]] || [[ -z $OUTPUT ]]; then - SINK_ELEMENT="gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" elif [[ "$OUTPUT" == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false" elif [[ "$OUTPUT" == "json" ]]; then @@ -97,7 +109,7 @@ elif [[ "$OUTPUT" == "display-and-json" ]]; then if [ -f "$OUTPUT_FILE" ]; then rm "$OUTPUT_FILE" fi - SINK_ELEMENT="gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=$OUTPUT_FILE ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=$OUTPUT_FILE ! videoconvert ! gvafpscounter ! autovideosink sync=false" else echo Error wrong value for SINK_ELEMENT parameter echo Valid values: "file" - render to file, "display" - render to screen, "fps" - print FPS, "json" - write to json file, "display-and-json" - render to screen and write to json file diff --git a/samples/gstreamer/gst_launch/detection_with_yolo/yolo_detect.sh b/samples/gstreamer/gst_launch/detection_with_yolo/yolo_detect.sh index e295e96c..bb8883cb 100755 --- a/samples/gstreamer/gst_launch/detection_with_yolo/yolo_detect.sh +++ b/samples/gstreamer/gst_launch/detection_with_yolo/yolo_detect.sh @@ -17,6 +17,22 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [MODEL] [DEVICE] [INPUT] [OUTPUT] [PPBKEND] [PRECISION]" + echo "" + echo "Arguments:" + echo " MODEL - Model name (default: yolox_s)" + echo " Supported: yolo_all, yolox-tiny, yolox_s, yolov7, yolov8s, yolov8n-obb, yolov8n-seg, yolov9c, yolov10s, yolo11s, yolo11s-obb, yolo11s-seg, yolo11s-pose" + echo " DEVICE - Device (default: GPU). Supported: CPU, GPU, NPU" + echo " INPUT - Input source (default: Pexels video URL)" + echo " OUTPUT - Output type (default: file). Supported: file, display, fps, json, display-and-json" + echo " PPBKEND - Preprocessing backend (default: auto). Supported: ie, opencv, va, va-surface-sharing" + echo " PRECISION - Model precision (default: INT8). Supported: INT8, FP32, FP16" + echo "" + exit 0 +fi + MODEL=${1:-"yolox_s"} # Supported values: yolo_all, yolox-tiny, yolox_s, yolov7, yolov8s, yolov8n-obb, yolov8n-seg, yolov9c, yolov10s, yolo11s, yolo11s-obb, yolo11s-seg, yolo11s-pose DEVICE=${2:-"GPU"} # Supported values: CPU, GPU, NPU INPUT=${3:-"https://videos.pexels.com/video-files/1192116/1192116-sd_640_360_30fps.mp4"} @@ -121,9 +137,9 @@ if [[ "$OUTPUT" == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit 1 fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=yolo_${FILE}_${MODEL}_${PRECISION}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=yolo_${FILE}_${MODEL}_${PRECISION}_${DEVICE}.mp4" elif [[ "$OUTPUT" == "display" ]] || [[ -z $OUTPUT ]]; then - SINK_ELEMENT="gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" elif [[ "$OUTPUT" == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false" elif [[ "$OUTPUT" == "json" ]]; then @@ -131,7 +147,7 @@ elif [[ "$OUTPUT" == "json" ]]; then SINK_ELEMENT="gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false" elif [[ "$OUTPUT" == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" else echo Error wrong value for SINK_ELEMENT parameter echo Valid values: "file" - render to file, "display" - render to screen, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json diff --git a/samples/gstreamer/gst_launch/face_detection_and_classification/face_detection_and_classification.sh b/samples/gstreamer/gst_launch/face_detection_and_classification/face_detection_and_classification.sh index d234dc5a..1a624308 100755 --- a/samples/gstreamer/gst_launch/face_detection_and_classification/face_detection_and_classification.sh +++ b/samples/gstreamer/gst_launch/face_detection_and_classification/face_detection_and_classification.sh @@ -13,6 +13,18 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [DEVICE] [OUTPUT]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " DEVICE - Device (default: CPU). Supported: CPU, GPU" + echo " OUTPUT - Output type (default: display). Supported: file, display, fps, json, display-and-json" + echo "" + exit 0 +fi + INPUT=${1:-https://github.com/intel-iot-devkit/sample-videos/raw/master/head-pose-face-detection-female-and-male.mp4} DEVICE=${2:-CPU} OUTPUT=${3:-display} # Supported values: display, fps, json, display-and-json @@ -30,7 +42,7 @@ else fi if [[ $OUTPUT == "display" ]] || [[ -z $OUTPUT ]]; then - SINK_ELEMENT="gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false " elif [[ $OUTPUT == "json" ]]; then @@ -38,7 +50,7 @@ elif [[ $OUTPUT == "json" ]]; then SINK_ELEMENT="gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false " elif [[ $OUTPUT == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "file" ]]; then FILE="$(basename ${INPUT%.*})" rm -f "face_detection_and_classification_${FILE}_${DEVICE}.mp4" @@ -50,7 +62,7 @@ elif [[ $OUTPUT == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! avimux name=mux ! filesink location=face_detection_and_classification_${FILE}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! avimux name=mux ! filesink location=face_detection_and_classification_${FILE}_${DEVICE}.mp4" else echo Error wrong value for OUTPUT parameter echo Valid values: "display" - render to screen, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json diff --git a/samples/gstreamer/gst_launch/geti_deployment/geti_sample.sh b/samples/gstreamer/gst_launch/geti_deployment/geti_sample.sh index 0e1e7828..2c315ad3 100755 --- a/samples/gstreamer/gst_launch/geti_deployment/geti_sample.sh +++ b/samples/gstreamer/gst_launch/geti_deployment/geti_sample.sh @@ -16,6 +16,21 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [MODEL_TYPE] [MODEL_PATH] [DEVICE] [PREPROC_BACKEND] [INPUT] [OUTPUT]" + echo "" + echo "Arguments:" + echo " MODEL_TYPE - Model type (default: detection). Supported: rotated-detection, instance-segmentation, detection, geti-detection, classification, geti-classification-single, geti-classification-multi, geti-obb, geti-segmentation, anomaly-detection" + echo " MODEL_PATH - Path to the model XML file relative to MODELS_PATH (default: /home/path/to/your/model.xml)" + echo " DEVICE - Device (default: CPU). Supported: CPU, GPU, NPU" + echo " PREPROC_BACKEND - Preprocessing backend (default: ie for CPU, va-surface-sharing for GPU, va for NPU). Supported: ie, opencv, va, va-surface-sharing" + echo " INPUT - Input source (default: Pexels video URL)" + echo " OUTPUT - Output type (default: file). Supported: file, display, fps, json, display-and-json" + echo "" + exit 0 +fi + # Default values for parameters # MODEL_TYPE can be rotated-detection, instance-segmentation, detection, geti-detection, classification, geti-obb, geti-segmentation, geti-classification-single, geti-classification-multi,anomaly-detection MODEL_TYPE=${1:-detection} @@ -106,9 +121,9 @@ if [[ $OUTPUT == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT="gvawatermark${WT_OBB_ELEMENT} ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=geti_${FILE}_${MODEL_TYPE}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark${WT_OBB_ELEMENT} ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=geti_${FILE}_${MODEL_TYPE}_${DEVICE}.mp4" elif [[ $OUTPUT == "display" ]] || [[ -z $OUTPUT ]]; then - SINK_ELEMENT="gvawatermark${WT_OBB_ELEMENT} ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark${WT_OBB_ELEMENT} ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false" elif [[ $OUTPUT == "json" ]]; then @@ -116,7 +131,7 @@ elif [[ $OUTPUT == "json" ]]; then SINK_ELEMENT="gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false" elif [[ $OUTPUT == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark${WT_OBB_ELEMENT}! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark${WT_OBB_ELEMENT}! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" else echo Error wrong value for SINK_ELEMENT parameter echo Valid values: "file" - render to file, "display" - render to screen, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json diff --git a/samples/gstreamer/gst_launch/gvaattachroi/gvaattachroi_sample.sh b/samples/gstreamer/gst_launch/gvaattachroi/gvaattachroi_sample.sh index a8cb760c..373a6c7c 100755 --- a/samples/gstreamer/gst_launch/gvaattachroi/gvaattachroi_sample.sh +++ b/samples/gstreamer/gst_launch/gvaattachroi/gvaattachroi_sample.sh @@ -14,6 +14,20 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [DEVICE] [OUTPUT] [ROI_COORDS]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " DEVICE - Device (default: CPU). Supported: CPU, GPU, NPU" + echo " OUTPUT - Output type (default: file). Supported: file, display, fps, json, display-and-json" + echo " ROI_COORDS - Specifies pixel absolute coordinates of ROI in form: x_top_left,y_top_left,x_bottom_right,y_bottom_right" + echo " If not defined, the roi list file ./roi_list.json will be used" + echo "" + exit 0 +fi + INPUT=${1:-"https://videos.pexels.com/video-files/1192116/1192116-sd_640_360_30fps.mp4"} DEVICE=${2:-"CPU"} # Supported values: CPU, GPU, NPU OUTPUT=${3:-"file"} # Supported values: file, display, fps, json, display-and-json @@ -49,9 +63,9 @@ if [[ "$OUTPUT" == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=gvaattachroi_${FILE}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=gvaattachroi_${FILE}_${DEVICE}.mp4" elif [[ "$OUTPUT" == "display" ]] || [[ -z $OUTPUT ]]; then - SINK_ELEMENT="gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" elif [[ "$OUTPUT" == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false" elif [[ "$OUTPUT" == "json" ]]; then @@ -59,7 +73,7 @@ elif [[ "$OUTPUT" == "json" ]]; then SINK_ELEMENT="gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false" elif [[ "$OUTPUT" == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" else echo Error wrong value for SINK_ELEMENT parameter echo Valid values: "file" - render to file, "display" - render to screen, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json diff --git a/samples/gstreamer/gst_launch/human_pose_estimation/human_pose_estimation.sh b/samples/gstreamer/gst_launch/human_pose_estimation/human_pose_estimation.sh index 145f6d54..43afb7a2 100755 --- a/samples/gstreamer/gst_launch/human_pose_estimation/human_pose_estimation.sh +++ b/samples/gstreamer/gst_launch/human_pose_estimation/human_pose_estimation.sh @@ -13,6 +13,18 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [DEVICE] [OUTPUT]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " DEVICE - Device (default: CPU). Supported: CPU, GPU" + echo " OUTPUT - Output type (default: display). Supported: file, display, fps, json, display-and-json" + echo "" + exit 0 +fi + INPUT=${1:-https://github.com/intel-iot-devkit/sample-videos/raw/master/face-demographics-walking.mp4} DEVICE=${2:-CPU} OUTPUT=${3:-display} # Supported values: display, fps, json, display-and-json @@ -28,7 +40,7 @@ else fi if [[ $OUTPUT == "display" ]] || [[ -z $OUTPUT ]]; then - SINK_ELEMENT="gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false " elif [[ $OUTPUT == "json" ]]; then @@ -36,7 +48,7 @@ elif [[ $OUTPUT == "json" ]]; then SINK_ELEMENT="gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false " elif [[ $OUTPUT == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "file" ]]; then FILE="$(basename ${INPUT%.*})" rm -f "human_pose_estimation_${FILE}_${DEVICE}.mp4" @@ -48,7 +60,7 @@ elif [[ $OUTPUT == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! avimux name=mux ! filesink location=human_pose_estimation_${FILE}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! avimux name=mux ! filesink location=human_pose_estimation_${FILE}_${DEVICE}.mp4" else echo Error wrong value for OUTPUT parameter echo Valid values: "display" - render to screen, "file" - render to file, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json diff --git a/samples/gstreamer/gst_launch/instance_segmentation/instance_segmentation.sh b/samples/gstreamer/gst_launch/instance_segmentation/instance_segmentation.sh index 8fafb46a..da4fe9a3 100755 --- a/samples/gstreamer/gst_launch/instance_segmentation/instance_segmentation.sh +++ b/samples/gstreamer/gst_launch/instance_segmentation/instance_segmentation.sh @@ -26,6 +26,20 @@ OUTPUT="file" BENCHMARK_SINK="" OUTPUT_DIRECTORY="" +show_usage() { + echo "Usage: $0 [--model MODEL] [--device DEVICE] [--input INPUT] [--output OUTPUT] [--benchmark_sink BENCHMARK_SINK] [--output-directory OUTPUT_DIRECTORY]" + echo "" + echo "Arguments:" + echo " --model MODEL - Model to use (default: mask_rcnn_inception_resnet_v2_atrous_coco). Allowed: ${ALLOWED_MODELS[*]}" + echo " --device DEVICE - Device to use (default: CPU). Allowed: ${ALLOWED_DEVICES[*]}" + echo " --input INPUT - Input source (default: Pexels video URL)" + echo " --output OUTPUT - Output type (default: file). Allowed: ${ALLOWED_OUTPUTS[*]}" + echo " --benchmark_sink BENCHMARK_SINK - Benchmark sink element (default: empty)" + echo " --output-directory OUTPUT_DIRECTORY - Directory to save output files (default: current directory)" + echo " --help - Show this help message" + echo "" +} + # Function to check if an item is in an array containsElement () { local element match="$1" @@ -75,6 +89,10 @@ while [[ "$#" -gt 0 ]]; do OUTPUT_DIRECTORY="$2" shift ;; + --help) + show_usage + exit 0 + ;; *) echo "Unknown parameter passed: $1" exit 1 @@ -150,12 +168,12 @@ if [[ `uname` != "MINGW64"* ]]; then exit fi fi -sink_elements["file"]="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=${OUTPUT_DIRECTORY}instance_segmentation_${FILE}_${DEVICE}.mp4" -sink_elements['display']="gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" +sink_elements["file"]="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=${OUTPUT_DIRECTORY}instance_segmentation_${FILE}_${DEVICE}.mp4" +sink_elements['display']="vapostproc ! gvawatermark ! videoconvertscale ! gvafpscounter ! autovideosink sync=false" sink_elements['fps']="gvafpscounter ! fakesink sync=false" sink_elements['json']="gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=${OUTPUT_DIRECTORY}output.json ! fakesink sync=false" -sink_elements['display-and-json']="gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=${OUTPUT_DIRECTORY}instance_segmentation_${FILE}_${DEVICE}.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" -sink_elements["jpeg"]="gvawatermark ! jpegenc ! multifilesink location=${OUTPUT_DIRECTORY}instance_segmentation_${FILE}_${DEVICE}_%05d.jpeg" +sink_elements['display-and-json']="vapostproc ! gvawatermark ! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=${OUTPUT_DIRECTORY}instance_segmentation_${FILE}_${DEVICE}.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" +sink_elements["jpeg"]="vapostproc ! gvawatermark ! jpegenc ! multifilesink location=${OUTPUT_DIRECTORY}instance_segmentation_${FILE}_${DEVICE}_%05d.jpeg" SINK_ELEMENT=${sink_elements[$OUTPUT]} # Construct the GStreamer pipeline diff --git a/samples/gstreamer/gst_launch/license_plate_recognition/license_plate_recognition.sh b/samples/gstreamer/gst_launch/license_plate_recognition/license_plate_recognition.sh index c8ad9230..29eb75d6 100755 --- a/samples/gstreamer/gst_launch/license_plate_recognition/license_plate_recognition.sh +++ b/samples/gstreamer/gst_launch/license_plate_recognition/license_plate_recognition.sh @@ -17,6 +17,18 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [DEVICE] [OUTPUT]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " DEVICE - Device (default: GPU). Supported: CPU, GPU" + echo " OUTPUT - Output type (default: fps). Supported: display, display-async, fps, json, display-and-json, file" + echo "" + exit 0 +fi + # Command-line parameters INPUT=${1:-https://github.com/open-edge-platform/edge-ai-resources/raw/main/videos/ParkingVideo.mp4} DEVICE=${2:-GPU} # Device for decode and inference in OpenVINO(TM) format, examples: AUTO, CPU, GPU, GPU.0 @@ -65,9 +77,9 @@ else fi if [[ $OUTPUT == "display" ]]; then - SINK_ELEMENT="gvawatermark ! videoconvert ! gvafpscounter ! autovideosink" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvert ! gvafpscounter ! autovideosink" elif [[ $OUTPUT == "display-async" ]]; then - SINK_ELEMENT="gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false " elif [[ $OUTPUT == "json" ]]; then @@ -75,7 +87,7 @@ elif [[ $OUTPUT == "json" ]]; then SINK_ELEMENT="gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false" elif [[ $OUTPUT == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "file" ]]; then FILE="$(basename ${INPUT%.*})" rm -f "lpr_${FILE}_${DEVICE}.mp4" @@ -87,7 +99,7 @@ elif [[ $OUTPUT == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=lpr_${FILE}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=lpr_${FILE}_${DEVICE}.mp4" else echo Error wrong value for OUTPUT parameter echo Valid values: "display" - render to screen, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json diff --git a/samples/gstreamer/gst_launch/lvm/generate_frame_embeddings.sh b/samples/gstreamer/gst_launch/lvm/generate_frame_embeddings.sh index cca4b6ad..44ead52e 100755 --- a/samples/gstreamer/gst_launch/lvm/generate_frame_embeddings.sh +++ b/samples/gstreamer/gst_launch/lvm/generate_frame_embeddings.sh @@ -29,6 +29,20 @@ SUPPORTED_MODELS=( "clip-vit-base-patch32" ) +# Print help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [SOURCE_FILE] [DEVICE] [OUTPUT] [MODEL] [PREPROCESSING_BACKEND]" + echo "" + echo "Arguments:" + echo " SOURCE_FILE - Input source (default: Pexels video URL)" + echo " DEVICE - Device (default: CPU). Supported: CPU, GPU" + echo " OUTPUT - Output type (default: json). Supported: json, fps" + echo " MODEL - Model name (default: clip-vit-large-patch14). Supported: ${SUPPORTED_MODELS[*]}" + echo " PREPROCESSING_BACKEND - Preprocessing backend (default: opencv). Supported: ie, opencv, va, va-surface-sharing" + echo "" + exit 0 +fi + # Arguments SOURCE_FILE=${1:-$DEFAULT_SOURCE} DEVICE=${2:-$DEFAULT_DEVICE} diff --git a/samples/gstreamer/gst_launch/metapublish/metapublish.sh b/samples/gstreamer/gst_launch/metapublish/metapublish.sh index 6f008fef..4a33d684 100755 --- a/samples/gstreamer/gst_launch/metapublish/metapublish.sh +++ b/samples/gstreamer/gst_launch/metapublish/metapublish.sh @@ -14,6 +14,20 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [METHOD] [OUTPUT] [FORMAT] [TOPIC]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " METHOD - Metapublish method (default: file). Supported: file, kafka, mqtt" + echo " OUTPUT - Output destination (default: stdout for file, localhost:9092 for kafka, localhost:1883 for mqtt)" + echo " FORMAT - Output format (default: json for file, json-lines for kafka and mqtt). Supported: json, json-lines" + echo " TOPIC - Topic name (default: dlstreamer). Required for kafka and mqtt" + echo "" + exit 0 +fi + INPUT=${1:-https://github.com/intel-iot-devkit/sample-videos/raw/master/head-pose-face-detection-female-and-male.mp4} METHOD=${2:-file} # Accepts: file, kafka, mqtt OUTPUT=${3} # Path to file if method==file, host and port of message broker if method==kafka/mqtt. Default: "stdout" for file, "localhost:9092" for kafka, and "localhost:1883" for mqtt diff --git a/samples/gstreamer/gst_launch/multi_stream/multi_stream_sample.sh b/samples/gstreamer/gst_launch/multi_stream/multi_stream_sample.sh index 185f03d6..940e202d 100755 --- a/samples/gstreamer/gst_launch/multi_stream/multi_stream_sample.sh +++ b/samples/gstreamer/gst_launch/multi_stream/multi_stream_sample.sh @@ -17,13 +17,28 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# List help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [DEVICE_STREAM_12] [DEVICE_STREAM_34] [MODEL_1] [MODEL_2] [OUTPUT]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " DEVICE_STREAM_12 - Device for stream 1 & 2 (default: NPU). Supported: CPU, GPU, NPU" + echo " DEVICE_STREAM_34 - Device for stream 3 & 4 (default: GPU). Supported: CPU, GPU, NPU" + echo " MODEL_1 - Model for stream 1 & 2 (default: yolov8s). Supported: yolox-tiny, yolox_s, yolov7, yolov8s, yolov9c" + echo " MODEL_2 - Model for stream 3 & 4 (default: yolov8s). Supported: yolox-tiny, yolox_s, yolov7, yolov8s, yolov9c" + echo " OUTPUT - Output type (default: file). Supported: file, json" + echo "" + exit 0 +fi + INPUT=${1:-"https://videos.pexels.com/video-files/1192116/1192116-sd_640_360_30fps.mp4"} DEVICE_STREAM_12=${2:-"NPU"} # Supported values: CPU, GPU, NPU DEVICE_STREAM_34=${3:-"GPU"} # Supported values: CPU, GPU, NPU MODEL_1=${4:-"yolov8s"} # Supported values: yolox-tiny, yolox_s, yolov7, yolov8s, yolov9c MODEL_2=${5:-"yolov8s"} # Supported values: yolox-tiny, yolox_s, yolov7, yolov8s, yolov9c OUTPUT=${6:-"file"} # Supported values: file, json -GSTVA=${7:-"VA"} # Supported values: VA, VAAPI +GSTVA="VA" cd "$(dirname "$0")" @@ -91,13 +106,6 @@ if [ ! -f ${INPUT} ]; then fi SOURCE_ELEMENT="filesrc location=${INPUT}" -### GSTVA , OUTPUT ################################################################## -if [[ "$GSTVA" != "VA" ]] && [[ "$GSTVA" != "VAAPI" ]]; then - echo "Error: Wrong value for GSTVA parameter." - echo "Valid values: VA, VAAPI" - exit -fi - ### STREAM 1 and 2 ### # CPU device DECODE_ELEMENT_STR12="decodebin3" @@ -114,16 +122,6 @@ if [[ "$GSTVA" == "VA" ]]; then PREPROC_BACKEND_STR12="${PREPROC_BACKEND_STR12} nireq=4 model-instance-id=inf0" fi fi -## GST-VAAPI ## -if [[ "$GSTVA" == "VAAPI" ]]; then - if [[ "$DEVICE_STREAM_12" == "GPU" ]]; then - PREPROC_BACKEND_STR12="vaapi-surface-sharing" - fi - if [[ "$DEVICE_STREAM_12" == "GPU" ]] || [[ "$DEVICE_STREAM_12" == "NPU" ]]; then - DECODE_ELEMENT_STR12+=" ! vaapipostproc ! video/x-raw(memory:VASurface)" - PREPROC_BACKEND_STR12="${PREPROC_BACKEND_STR12} nireq=4 model-instance-id=inf0" - fi -fi ### STREAM 3 and 4 ### # CPU device @@ -141,16 +139,6 @@ if [[ "$GSTVA" == "VA" ]]; then PREPROC_BACKEND_STR34="${PREPROC_BACKEND_STR34} nireq=4 model-instance-id=inf1" fi fi -## GST-VAAPI ## -if [[ "$GSTVA" == "VAAPI" ]]; then - if [[ "$DEVICE_STREAM_34" == "GPU" ]]; then - PREPROC_BACKEND_STR34="vaapi-surface-sharing" - fi - if [[ "$DEVICE_STREAM_34" == "GPU" ]] || [[ "$DEVICE_STREAM_34" == "NPU" ]]; then - DECODE_ELEMENT_STR34+=" ! vaapipostproc ! video/x-raw(memory:VASurface)" - PREPROC_BACKEND_STR34="${PREPROC_BACKEND_STR34} nireq=4 model-instance-id=inf1" - fi -fi ## Output ## if [[ "$GSTVA" == "VA" ]]; then @@ -162,10 +150,7 @@ if [[ "$GSTVA" == "VA" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT_BASE="gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! " -fi -if [[ "$GSTVA" == "VAAPI" ]]; then - SINK_ELEMENT_BASE="gvawatermark ! gvafpscounter ! vaapih264enc ! h264parse ! mp4mux ! " + SINK_ELEMENT_BASE="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! " fi if [[ "$OUTPUT" == "file" ]]; then diff --git a/samples/gstreamer/gst_launch/vehicle_pedestrian_tracking/vehicle_pedestrian_tracking.sh b/samples/gstreamer/gst_launch/vehicle_pedestrian_tracking/vehicle_pedestrian_tracking.sh index 52b6f4b8..e416882a 100755 --- a/samples/gstreamer/gst_launch/vehicle_pedestrian_tracking/vehicle_pedestrian_tracking.sh +++ b/samples/gstreamer/gst_launch/vehicle_pedestrian_tracking/vehicle_pedestrian_tracking.sh @@ -14,6 +14,20 @@ else echo "MODELS_PATH: $MODELS_PATH" fi +# Print help message +if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then + echo "Usage: $0 [INPUT] [DETECTION_INTERVAL] [DEVICE] [OUTPUT] [TRACKING_TYPE]" + echo "" + echo "Arguments:" + echo " INPUT - Input source (default: Pexels video URL)" + echo " DETECTION_INTERVAL - Object detection interval (default: 3). 1 means detection every frame, 2 means detection every second frame, etc." + echo " DEVICE - Device for decode and inference in OpenVINO(TM) format (default: AUTO). Supported: AUTO, CPU, GPU, GPU.0" + echo " OUTPUT - Output type (default: display-async). Supported: display, display-async, fps, json, display-and-json, file" + echo " TRACKING_TYPE - Object tracking type (default: short-term-imageless). Supported: short-term-imageless, zero-term, zero-term-imageless" + echo "" + exit 0 +fi + # Command-line parameters INPUT=${1:-https://github.com/intel-iot-devkit/sample-videos/raw/master/person-bicycle-car-detection.mp4} # Input file or URL DETECTION_INTERVAL=${2:-3} # Object detection interval: 1 means detection every frame, 2 means detection every second frame, etc. @@ -46,9 +60,9 @@ else fi if [[ $OUTPUT == "display" ]]; then - SINK_ELEMENT="gvawatermark ! videoconvert ! gvafpscounter ! autovideosink" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvert ! gvafpscounter ! autovideosink" elif [[ $OUTPUT == "display-async" ]]; then - SINK_ELEMENT="gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false " elif [[ $OUTPUT == "json" ]]; then @@ -56,7 +70,7 @@ elif [[ $OUTPUT == "json" ]]; then SINK_ELEMENT="gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false" elif [[ $OUTPUT == "display-and-json" ]]; then rm -f output.json - SINK_ELEMENT="gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvametaconvert ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false" elif [[ $OUTPUT == "file" ]]; then FILE="$(basename ${INPUT%.*})" rm -f "vehicle_pedestrian_tracking_${FILE}_${DEVICE}.mp4" @@ -68,7 +82,7 @@ elif [[ $OUTPUT == "file" ]]; then echo "Error - VA-API H.264 encoder not found." exit fi - SINK_ELEMENT="gvawatermark ! gvafpscounter ! ${ENCODER} ! avimux name=mux ! filesink location=vehicle_pedestrian_tracking_${FILE}_${DEVICE}.mp4" + SINK_ELEMENT="vapostproc ! gvawatermark ! gvafpscounter ! ${ENCODER} ! avimux name=mux ! filesink location=vehicle_pedestrian_tracking_${FILE}_${DEVICE}.mp4" else echo Error wrong value for OUTPUT parameter echo Valid values: "display" - render to screen, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json