#compdef ns-process-data

# AUTOMATICALLY GENERATED by `shtab`


_shtab_tyro_ns_process_data_commands() {
  local _commands=(
    "aria:Processes Project Aria data i.e. a VRS of the raw recording streams and the MPS attachments that provide poses, calibration, and 3d points. More information on MPS data can be found at\:"
    "images:Process images into a nerfstudio dataset."
    "metashape:Process Metashape data into a nerfstudio dataset. This script assumes that cameras have been aligned using Metashape. After alignment, it is necessary to export the camera poses as a \`.xml\` file. This option can be found under \`File \> Export \> Export Cameras\`."
    "odm:Process ODM data into a nerfstudio dataset. This script does the following\:"
    "polycam:Process Polycam data into a nerfstudio dataset. To capture data, use the Polycam app on an iPhone or iPad with LiDAR. The capture must be in LiDAR or ROOM mode. Developer mode must be enabled in the app settings, this will enable a raw data export option in the export menus. The exported data folder is used as the input to this script."
    "realitycapture:Process RealityCapture data into a nerfstudio dataset. This script assumes that cameras have been aligned using RealityCapture. After alignment, it is necessary to export the camera poses as a \`.csv\` file using the \`Internal\/External camera parameters\` option."
    "record3d:Process Record3D data into a nerfstudio dataset. This script does the following\:"
    "video:Process videos into a nerfstudio dataset. This script does the following\:"
  )
  _describe 'ns-process-data commands' _commands
}

_shtab_tyro_ns_process_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_process_data_aria_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--vrs-file[Path to the VRS file. (required)]:vrs-file:_files"
  "--mps-data-dir[Path to Project Aria Machine Perception Services (MPS) attachments. (required)]:mps-data-dir:_files -/"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--max-frames[Number of frames to process. (default\: 350)]:max-frames:"
  "--max-output-size[Size of output images. We use the same for width\/height. (default\: 1408)]:max-output-size:"
)

_shtab_tyro_ns_process_data_images_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path the data, either a video file or a directory of images. (required)]:data:_files"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--eval-data[Path the eval data, either a video file or a directory of images. If set to None, the first will be used both for training and eval (default\: None)]:eval-data:_files"
  {--verbose,--no-verbose}"[If True, print extra logging. (default\: False)]:verbose:"
  "--camera-type[Camera model to use. (default\: perspective)]:camera-type:(perspective fisheye equirectangular pinhole simple_pinhole)"
  "--matching-method[Feature matching method to use. Vocab tree is recommended for a balance of speed
and accuracy. Exhaustive is slower but more accurate. Sequential is faster but
should only be used for videos. (default\: vocab_tree)]:matching-method:(exhaustive sequential vocab_tree)"
  "--sfm-tool[Structure from motion tool to use. Colmap will use sift features, hloc can use
many modern methods such as superpoint features and superglue matcher (default\: any)]:sfm-tool:(any colmap hloc)"
  {--refine-pixsfm,--no-refine-pixsfm}"[If True, runs refinement using Pixel Perfect SFM.
Only works with hloc sfm_tool (default\: False)]:refine-pixsfm:"
  {--refine-intrinsics,--no-refine-intrinsics}"[If True, do bundle adjustment to refine intrinsics.
Only works with colmap sfm_tool (default\: True)]:refine-intrinsics:"
  "--feature-type[Type of feature to use. (default\: any)]:feature-type:(any sift superpoint superpoint_aachen superpoint_max superpoint_inloc r2d2 d2net-ss sosnet disk)"
  "--matcher-type[Matching algorithm. (default\: any)]:matcher-type:(any NN superglue superglue-fast NN-superpoint NN-ratio NN-mutual adalam disk+lightglue superpoint+lightglue)"
  "--num-downscales[Number of times to downscale the images. Downscales by 2 each time. For example a value of 3 will downscale the
images by 2x, 4x, and 8x. (default\: 3)]:num-downscales:"
  {--skip-colmap,--no-skip-colmap}"[If True, skips COLMAP and generates transforms.json if possible. (default\: False)]:skip-colmap:"
  {--skip-image-processing,--no-skip-image-processing}"[If True, skips copying and downscaling of images and only runs COLMAP if possible and enabled (default\: False)]:skip-image-processing:"
  "--colmap-model-path[Optionally sets the path of the colmap model. Used only when --skip-colmap is set to True. The path is relative
to the output directory. (default\: colmap\/sparse\/0)]:colmap-model-path:_files"
  "--colmap-cmd[How to call the COLMAP executable. (default\: colmap)]:colmap-cmd:"
  "--images-per-equirect[Number of samples per image to take from each equirectangular image.
Used only when camera-type is equirectangular. (default\: 8)]:images-per-equirect:(8 14)"
  "--crop-factor[Portion of the image to crop. All values should be in \[0,1\]. (top, bottom, left, right) (default\: 0.0 0.0 0.0 0.0)]:crop-factor:"
  "--crop-bottom[Portion of the image to crop from the bottom.
Can be used instead of \`crop-factor 0.0 \\\[num\] 0.0 0.0\` Should be in \[0,1\]. (default\: 0.0)]:crop-bottom:"
  {--gpu,--no-gpu}"[If True, use GPU. (default\: True)]:gpu:"
  {--use-sfm-depth,--no-use-sfm-depth}"[If True, export and use depth maps induced from SfM points. (default\: False)]:use-sfm-depth:"
  {--include-depth-debug,--no-include-depth-debug}"[If --use-sfm-depth and this flag is True, also export debug images showing Sf overlaid upon input images. (default\: False)]:include-depth-debug:"
  {--same-dimensions,--no-same-dimensions}"[Whether to assume all images are same dimensions and so to use fast downscaling with no autorotation. (default\: True)]:same-dimensions:"
  {--use-single-camera-mode,--no-use-single-camera-mode}"[Whether to assume all images taken with the same camera characteristics, set to False for multiple cameras in colmap (only works with hloc sfm_tool). (default\: True)]:use-single-camera-mode:"
  "--percent-radius-crop[Create circle crop mask. The radius is the percent of the image diagonal. (default\: 1.0)]:percent-radius-crop:"
)

_shtab_tyro_ns_process_data_metashape_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--xml[Path to the Metashape xml file. (required)]:xml:_files"
  "--data[Path the data, either a video file or a directory of images. (required)]:data:_files"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--eval-data[Path the eval data, either a video file or a directory of images. If set to None, the first will be used both for training and eval (default\: None)]:eval-data:_files"
  {--verbose,--no-verbose}"[If True, print extra logging. (default\: False)]:verbose:"
  "--ply[Path to the Metashape point export ply file. (default\: None)]:ply:_files"
  "--num-downscales[Number of times to downscale the images. Downscales by 2 each time. For example a value of 3
will downscale the images by 2x, 4x, and 8x. (default\: 3)]:num-downscales:"
  "--max-dataset-size[Max number of images to train on. If the dataset has more, images will be sampled approximately evenly. If -1,
use all images. (default\: 600)]:max-dataset-size:"
)

_shtab_tyro_ns_process_data_odm_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path the data, either a video file or a directory of images. (required)]:data:_files"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--eval-data[Path the eval data, either a video file or a directory of images. If set to None, the first will be used both for training and eval (default\: None)]:eval-data:_files"
  {--verbose,--no-verbose}"[If True, print extra logging. (default\: False)]:verbose:"
  "--num-downscales[Number of times to downscale the images. Downscales by 2 each time. For example a value of 3
will downscale the images by 2x, 4x, and 8x. (default\: 3)]:num-downscales:"
  "--max-dataset-size[Max number of images to train on. If the dataset has more, images will be sampled approximately evenly. If -1,
use all images. (default\: 600)]:max-dataset-size:"
)

_shtab_tyro_ns_process_data_polycam_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path the data, either a video file or a directory of images. (required)]:data:_files"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--eval-data[Path the eval data, either a video file or a directory of images. If set to None, the first will be used both for training and eval (default\: None)]:eval-data:_files"
  {--verbose,--no-verbose}"[If True, print extra logging. (default\: False)]:verbose:"
  "--num-downscales[Number of times to downscale the images. Downscales by 2 each time. For example a value of 3
will downscale the images by 2x, 4x, and 8x. (default\: 3)]:num-downscales:"
  {--use-uncorrected-images,--no-use-uncorrected-images}"[If True, use the raw images from the polycam export. If False, use the corrected images. (default\: False)]:use-uncorrected-images:"
  "--max-dataset-size[Max number of images to train on. If the dataset has more, images will be sampled approximately evenly. If -1,
use all images. (default\: 600)]:max-dataset-size:"
  "--min-blur-score[Minimum blur score to use an image. If the blur score is below this value, the image will be skipped. (default\: 25)]:min-blur-score:"
  "--crop-border-pixels[Number of pixels to crop from each border of the image. Useful as borders may be black due to undistortion. (default\: 15)]:crop-border-pixels:"
  {--use-depth,--no-use-depth}"[If True, processes the generated depth maps from Polycam (default\: False)]:use-depth:"
)

_shtab_tyro_ns_process_data_realitycapture_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--csv[Path to the RealityCapture cameras CSV file. (required)]:csv:_files"
  "--data[Path the data, either a video file or a directory of images. (required)]:data:_files"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--eval-data[Path the eval data, either a video file or a directory of images. If set to None, the first will be used both for training and eval (default\: None)]:eval-data:_files"
  {--verbose,--no-verbose}"[If True, print extra logging. (default\: False)]:verbose:"
  "--ply[Path to the RealityCapture exported ply file (default\: None)]:ply:_files"
  "--num-downscales[Number of times to downscale the images. Downscales by 2 each time. For example a value of 3
will downscale the images by 2x, 4x, and 8x. (default\: 3)]:num-downscales:"
  "--max-dataset-size[Max number of images to train on. If the dataset has more, images will be sampled approximately evenly. If -1,
use all images. (default\: 600)]:max-dataset-size:"
)

_shtab_tyro_ns_process_data_record3d_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path the data, either a video file or a directory of images. (required)]:data:_files"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--eval-data[Path the eval data, either a video file or a directory of images. If set to None, the first will be used both for training and eval (default\: None)]:eval-data:_files"
  {--verbose,--no-verbose}"[If True, print extra logging. (default\: False)]:verbose:"
  "--ply-dir[Path to the Record3D directory of point export ply files. (default\: None)]:ply-dir:_files -/"
  "--voxel-size[Voxel size for down sampling dense point cloud (default\: 0.8)]:voxel-size:"
  "--num-downscales[Number of times to downscale the images. Downscales by 2 each time. For example a value of 3
will downscale the images by 2x, 4x, and 8x. (default\: 3)]:num-downscales:"
  "--max-dataset-size[Max number of images to train on. If the dataset has more, images will be sampled approximately evenly. If -1,
use all images. (default\: 300)]:max-dataset-size:"
)

_shtab_tyro_ns_process_data_video_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path the data, either a video file or a directory of images. (required)]:data:_files"
  "--output-dir[Path to the output directory. (required)]:output-dir:_files -/"
  "--eval-data[Path the eval data, either a video file or a directory of images. If set to None, the first will be used both for training and eval (default\: None)]:eval-data:_files"
  {--verbose,--no-verbose}"[If True, print extra logging. (default\: False)]:verbose:"
  "--camera-type[Camera model to use. (default\: perspective)]:camera-type:(perspective fisheye equirectangular pinhole simple_pinhole)"
  "--matching-method[Feature matching method to use. Vocab tree is recommended for a balance of speed
and accuracy. Exhaustive is slower but more accurate. Sequential is faster but
should only be used for videos. (default\: sequential)]:matching-method:(exhaustive sequential vocab_tree)"
  "--sfm-tool[Structure from motion tool to use. Colmap will use sift features, hloc can use
many modern methods such as superpoint features and superglue matcher (default\: any)]:sfm-tool:(any colmap hloc)"
  {--refine-pixsfm,--no-refine-pixsfm}"[If True, runs refinement using Pixel Perfect SFM.
Only works with hloc sfm_tool (default\: False)]:refine-pixsfm:"
  {--refine-intrinsics,--no-refine-intrinsics}"[If True, do bundle adjustment to refine intrinsics.
Only works with colmap sfm_tool (default\: True)]:refine-intrinsics:"
  "--feature-type[Type of feature to use. (default\: any)]:feature-type:(any sift superpoint superpoint_aachen superpoint_max superpoint_inloc r2d2 d2net-ss sosnet disk)"
  "--matcher-type[Matching algorithm. (default\: any)]:matcher-type:(any NN superglue superglue-fast NN-superpoint NN-ratio NN-mutual adalam disk+lightglue superpoint+lightglue)"
  "--num-downscales[Number of times to downscale the images. Downscales by 2 each time. For example a value of 3 will downscale the
images by 2x, 4x, and 8x. (default\: 3)]:num-downscales:"
  {--skip-colmap,--no-skip-colmap}"[If True, skips COLMAP and generates transforms.json if possible. (default\: False)]:skip-colmap:"
  {--skip-image-processing,--no-skip-image-processing}"[If True, skips copying and downscaling of images and only runs COLMAP if possible and enabled (default\: False)]:skip-image-processing:"
  "--colmap-model-path[Optionally sets the path of the colmap model. Used only when --skip-colmap is set to True. The path is relative
to the output directory. (default\: colmap\/sparse\/0)]:colmap-model-path:_files"
  "--colmap-cmd[How to call the COLMAP executable. (default\: colmap)]:colmap-cmd:"
  "--images-per-equirect[Number of samples per image to take from each equirectangular image.
Used only when camera-type is equirectangular. (default\: 8)]:images-per-equirect:(8 14)"
  "--crop-factor[Portion of the image to crop. All values should be in \[0,1\]. (top, bottom, left, right) (default\: 0.0 0.0 0.0 0.0)]:crop-factor:"
  "--crop-bottom[Portion of the image to crop from the bottom.
Can be used instead of \`crop-factor 0.0 \\\[num\] 0.0 0.0\` Should be in \[0,1\]. (default\: 0.0)]:crop-bottom:"
  {--gpu,--no-gpu}"[If True, use GPU. (default\: True)]:gpu:"
  {--use-sfm-depth,--no-use-sfm-depth}"[If True, export and use depth maps induced from SfM points. (default\: False)]:use-sfm-depth:"
  {--include-depth-debug,--no-include-depth-debug}"[If --use-sfm-depth and this flag is True, also export debug images showing Sf overlaid upon input images. (default\: False)]:include-depth-debug:"
  {--same-dimensions,--no-same-dimensions}"[Whether to assume all images are same dimensions and so to use fast downscaling with no autorotation. (default\: True)]:same-dimensions:"
  {--use-single-camera-mode,--no-use-single-camera-mode}"[Whether to assume all images taken with the same camera characteristics, set to False for multiple cameras in colmap (only works with hloc sfm_tool). (default\: True)]:use-single-camera-mode:"
  "--num-frames-target[Target number of frames to use per video, results may not be exact. (default\: 300)]:num-frames-target:"
  "--percent-radius-crop[Create circle crop mask. The radius is the percent of the image diagonal. (default\: 1.0)]:percent-radius-crop:"
  "--random-seed[Random seed to select video frames for training set (default\: None)]:random-seed:"
  "--eval-random-seed[Random seed to select video frames for eval set (default\: None)]:eval-random-seed:"
)


_shtab_tyro_ns_process_data() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_process_data_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_process_data_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_process_data_options+=(': :_shtab_tyro_ns_process_data_commands' '*::: :->ns-process-data')
  fi
  _arguments -C -s $_shtab_tyro_ns_process_data_options

  case $state in
    ns-process-data)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_process_data-$line[1]:"
      case $line[1] in
        aria) _arguments -C -s $_shtab_tyro_ns_process_data_aria_options ;;
        images) _arguments -C -s $_shtab_tyro_ns_process_data_images_options ;;
        metashape) _arguments -C -s $_shtab_tyro_ns_process_data_metashape_options ;;
        odm) _arguments -C -s $_shtab_tyro_ns_process_data_odm_options ;;
        polycam) _arguments -C -s $_shtab_tyro_ns_process_data_polycam_options ;;
        realitycapture) _arguments -C -s $_shtab_tyro_ns_process_data_realitycapture_options ;;
        record3d) _arguments -C -s $_shtab_tyro_ns_process_data_record3d_options ;;
        video) _arguments -C -s $_shtab_tyro_ns_process_data_video_options ;;
      esac
  esac
}



typeset -A opt_args

if [[ $zsh_eval_context[-1] == eval ]]; then
  # eval/source/. command, register function for later
  compdef _shtab_tyro_ns_process_data -N ns-process-data
else
  # autoload from fpath, call function directly
  _shtab_tyro_ns_process_data "$@"
fi

