#compdef ns-train

# AUTOMATICALLY GENERATED by `shtab`


_shtab_tyro_ns_train_commands() {
  local _commands=(
    "BioNeRF:\[External, run \'ns-train BioNeRF\' to install\] BioNeRF. Nerfstudio implementation"
    "depth-nerfacto:Nerfacto with depth supervision."
    "dnerf:Dynamic-NeRF model. (slow)"
    "generfacto:Generative Text to NeRF model"
    "igs2gs:\[External, run \'ns-train igs2gs\' to install\] Instruct-GS2GS. Full model, used in paper"
    "in2n:\[External, run \'ns-train in2n\' to install\] Instruct-NeRF2NeRF. Full model, used in paper"
    "in2n-small:\[External, run \'ns-train in2n-small\' to install\] Instruct-NeRF2NeRF. Half precision model"
    "in2n-tiny:\[External, run \'ns-train in2n-tiny\' to install\] Instruct-NeRF2NeRF. Half prevision with no LPIPS"
    "instant-ngp:Implementation of Instant-NGP. Recommended real-time model for unbounded scenes."
    "instant-ngp-bounded:Implementation of Instant-NGP. Recommended for bounded real and synthetic scenes"
    "kplanes:\[External, run \'ns-train kplanes\' to install\] K-Planes model tuned to static blender scenes"
    "kplanes-dynamic:\[External, run \'ns-train kplanes-dynamic\' to install\] K-Planes model tuned to dynamic DNeRF scenes"
    "lerf:\[External, run \'ns-train lerf\' to install\] LERF with OpenCLIP ViT-B\/16, used in paper"
    "lerf-big:\[External, run \'ns-train lerf-big\' to install\] LERF with OpenCLIP ViT-L\/14"
    "lerf-lite:\[External, run \'ns-train lerf-lite\' to install\] LERF with smaller network and less LERF samples"
    "mipnerf:High quality model for bounded scenes. (slow)"
    "nerfacto:Recommended real-time model tuned for real captures. This model will be continually updated."
    "nerfacto-big:"
    "nerfacto-huge:Larger version of Nerfacto with higher quality."
    "nerfgs:\[External, run \'ns-train nerfgs\' to install\] NeRFGS, used in paper"
    "nerfplayer-nerfacto:\[External, run \'ns-train nerfplayer-nerfacto\' to install\] NeRFPlayer with nerfacto backbone"
    "nerfplayer-ngp:\[External, run \'ns-train nerfplayer-ngp\' to install\] NeRFPlayer with instang-ngp-bounded backbone"
    "nerfsh:\[External, run \'ns-train nerfsh\' to install\] NeRF-SH, used in paper"
    "neus:Implementation of NeuS. (slow)"
    "neus-facto:Implementation of NeuS-Facto. (slow)"
    "phototourism:Uses the Phototourism data."
    "pynerf:\[External, run \'ns-train pynerf\' to install\] PyNeRF with proposal network. The default parameters are suited for outdoor scenes."
    "pynerf-occupancy-grid:\[External, run \'ns-train pynerf-occupancy-grid\' to install\] PyNeRF with occupancy grid. The default parameters are suited for synthetic scenes."
    "pynerf-synthetic:\[External, run \'ns-train pynerf-synthetic\' to install\] PyNeRF with proposal network. The default parameters are suited for synthetic scenes."
    "seathru-nerf:SeaThru-NeRF for underwater scenes."
    "seathru-nerf-lite:Light SeaThru-NeRF for underwater scenes."
    "semantic-nerfw:Predicts semantic segmentations and filters out transient objects."
    "signerf:\[External, run \'ns-train signerf\' to install\] SIGNeRF method (high quality) used in paper"
    "signerf_nerfacto:\[External, run \'ns-train signerf_nerfacto\' to install\] SIGNeRF method combined with Nerfacto (faster training less quality)"
    "splatfacto:Gaussian Splatting model"
    "splatfacto-big:Larger version of Splatfacto with higher quality."
    "splatfacto-w:\[External, run \'ns-train splatfacto-w\' to install\] Splatfacto in the wild"
    "tensorf:tensorf"
    "tetra-nerf:\[External, run \'ns-train tetra-nerf\' to install\] Tetra-NeRF. Different sampler - faster and better"
    "tetra-nerf-original:\[External, run \'ns-train tetra-nerf-original\' to install\] Tetra-NeRF. Official implementation from the paper"
    "vanilla-nerf:Original NeRF model. (slow)"
    "volinga:\[External, run \'ns-train volinga\' to install\] Real-time rendering model from Volinga. Directly exportable to NVOL format at https\:\/\/volinga.ai\/"
    "water:Water for underwater scenes."
    "water-big:Water big for underwater scenes."
    "water-ex:Water for underwater scenes."
    "water-splatting:Water-Splatting for underwater scenes."
    "water-splatting-big:Water-Splatting big for underwater scenes."
    "zipnerf:\[External, run \'ns-train zipnerf\' to install\] A pytorch implementation of \'Zip-NeRF\: Anti-Aliased Grid-Based Neural Radiance Fields\'"
  )
  _describe 'ns-train commands' _commands
}

_shtab_tyro_ns_train_depth_nerfacto_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train depth-nerfacto commands' _commands
}

_shtab_tyro_ns_train_dnerf_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train dnerf commands' _commands
}

_shtab_tyro_ns_train_instant_ngp_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train instant-ngp commands' _commands
}

_shtab_tyro_ns_train_instant_ngp_bounded_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train instant-ngp-bounded commands' _commands
}

_shtab_tyro_ns_train_mipnerf_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train mipnerf commands' _commands
}

_shtab_tyro_ns_train_nerfacto_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train nerfacto commands' _commands
}

_shtab_tyro_ns_train_nerfacto_big_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train nerfacto-big commands' _commands
}

_shtab_tyro_ns_train_nerfacto_huge_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train nerfacto-huge commands' _commands
}

_shtab_tyro_ns_train_neus_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train neus commands' _commands
}

_shtab_tyro_ns_train_neus_facto_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train neus-facto commands' _commands
}

_shtab_tyro_ns_train_phototourism_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train phototourism commands' _commands
}

_shtab_tyro_ns_train_seathru_nerf_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train seathru-nerf commands' _commands
}

_shtab_tyro_ns_train_seathru_nerf_lite_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train seathru-nerf-lite commands' _commands
}

_shtab_tyro_ns_train_semantic_nerfw_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train semantic-nerfw commands' _commands
}

_shtab_tyro_ns_train_splatfacto_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train splatfacto commands' _commands
}

_shtab_tyro_ns_train_splatfacto_big_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train splatfacto-big commands' _commands
}

_shtab_tyro_ns_train_tensorf_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train tensorf commands' _commands
}

_shtab_tyro_ns_train_vanilla_nerf_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train vanilla-nerf commands' _commands
}

_shtab_tyro_ns_train_water_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train water commands' _commands
}

_shtab_tyro_ns_train_water_big_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train water-big commands' _commands
}

_shtab_tyro_ns_train_water_ex_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train water-ex commands' _commands
}

_shtab_tyro_ns_train_water_splatting_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train water-splatting commands' _commands
}

_shtab_tyro_ns_train_water_splatting_big_commands() {
  local _commands=(
    "arkit-data:"
    "blender-data:"
    "colmap:"
    "dnerf-data:"
    "dycheck-data:"
    "instant-ngp-data:"
    "minimal-parser:"
    "nerfosr-data:"
    "nerfstudio-data:"
    "nuscenes-data:"
    "phototourism-data:"
    "scannet-data:"
    "scannetpp-data:"
    "sdfstudio-data:"
    "sitcoms3d-data:"
    "water-data:"
  )
  _describe 'ns-train water-splatting-big commands' _commands
}

_shtab_tyro_ns_train_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_BioNeRF_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_depth_nerfacto_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: depth-nerfacto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 4096)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: last_sample)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.hidden-dim[Dimension of hidden layers (default\: 64)]:pipeline.model.hidden-dim:"
  "--pipeline.model.hidden-dim-color[Dimension of hidden layers for color network (default\: 64)]:pipeline.model.hidden-dim-color:"
  "--pipeline.model.hidden-dim-transient[Dimension of hidden layers for transient network (default\: 64)]:pipeline.model.hidden-dim-transient:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the base mlp. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.base-res[Resolution of the base grid for the hashgrid. (default\: 16)]:pipeline.model.base-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 2048)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 19)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[How many hashgrid features per level (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 256 96)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 48)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. Piecewise is preferred for unbounded scenes. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.distortion-loss-mult[Distortion loss multiplier. (default\: 0.002)]:pipeline.model.distortion-loss-mult:"
  "--pipeline.model.orientation-loss-mult[Orientation loss multiplier on computed normals. (default\: 0.0001)]:pipeline.model.orientation-loss-mult:"
  "--pipeline.model.pred-normal-loss-mult[Predicted normal loss multiplier. (default\: 0.001)]:pipeline.model.pred-normal-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing. (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: True)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: True)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 1000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.predict-normals[Whether to predict normals or not. (default\: False)]:pipeline.model.predict-normals:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.implementation[Which implementation to use for the model. (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.appearance-embed-dim[Dimension of the appearance embedding. (default\: 32)]:pipeline.model.appearance-embed-dim:"
  "--pipeline.model.average-init-density[Average initial density output from MLP. (default\: 1.0)]:pipeline.model.average-init-density:"
  "--pipeline.model.depth-loss-mult[Lambda of the depth loss. (default\: 0.001)]:pipeline.model.depth-loss-mult:"
  "--pipeline.model.is-euclidean-depth[Whether input depth maps are Euclidean distances (or z-distances). (default\: False)]:pipeline.model.is-euclidean-depth:(True False)"
  "--pipeline.model.depth-sigma[Uncertainty around depth values in meters (defaults to 1cm). (default\: 0.01)]:pipeline.model.depth-sigma:"
  "--pipeline.model.should-decay-sigma[Whether to exponentially decay sigma. (default\: False)]:pipeline.model.should-decay-sigma:(True False)"
  "--pipeline.model.starting-depth-sigma[Starting uncertainty around depth values in meters (defaults to 0.2m). (default\: 0.2)]:pipeline.model.starting-depth-sigma:"
  "--pipeline.model.sigma-decay-rate[Rate of exponential decay. (default\: 0.99985)]:pipeline.model.sigma-decay-rate:"
  "--pipeline.model.depth-loss-type[Depth loss type. Note that \`PairPixelSampler\` has to be used for \`DepthLossType.SPARSENERF_RANKING\`
to work as expected. (default\: DS_NERF)]:pipeline.model.depth-loss-type:(DS_NERF URF SPARSENERF_RANKING)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 128)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: SO3xR3)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.proposal-networks.scheduler[(default\: None)]:optimizers.proposal-networks.scheduler:(None)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.fields.scheduler[(default\: None)]:optimizers.fields.scheduler:(None)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 5000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_depth_nerfacto_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_depth_nerfacto_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_depth_nerfacto_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_depth_nerfacto_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_depth_nerfacto_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_depth_nerfacto_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_depth_nerfacto_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_depth_nerfacto_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_depth_nerfacto_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_depth_nerfacto_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_depth_nerfacto_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_depth_nerfacto_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_depth_nerfacto_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_depth_nerfacto_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_depth_nerfacto_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_depth_nerfacto_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_dnerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: dnerf)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: wandb)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 1000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 1000000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 1024)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 1024)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-coarse-samples[Number of samples in coarse field evaluation (default\: 64)]:pipeline.model.num-coarse-samples:"
  "--pipeline.model.num-importance-samples[Number of samples in fine field evaluation (default\: 128)]:pipeline.model.num-importance-samples:"
  "--pipeline.model.enable-temporal-distortion[Specifies whether or not to include ray warping based on time. (default\: True)]:pipeline.model.enable-temporal-distortion:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: white)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.temporal-distortion-params.kind[(default\: DNERF)]:pipeline.model.temporal-distortion-params.kind:(DNERF)"
  "--optimizers.fields.scheduler[(default\: None)]:optimizers.fields.scheduler:(None)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.temporal-distortion.scheduler[(default\: None)]:optimizers.temporal-distortion.scheduler:(None)"
  "--optimizers.temporal-distortion.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.temporal-distortion.optimizer.lr:"
  "--optimizers.temporal-distortion.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.temporal-distortion.optimizer.eps:"
  "--optimizers.temporal-distortion.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.temporal-distortion.optimizer.max-norm:"
  "--optimizers.temporal-distortion.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.temporal-distortion.optimizer.weight-decay:"
)

_shtab_tyro_ns_train_dnerf_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_dnerf_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_dnerf_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_dnerf_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_dnerf_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_dnerf_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_dnerf_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_dnerf_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_dnerf_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_dnerf_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_dnerf_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_dnerf_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_dnerf_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_dnerf_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_dnerf_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_dnerf_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_generfacto_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: generfacto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: \'\')]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 200)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 50)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 50)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-resolution[Training resolution (default\: 64)]:pipeline.datamanager.train-resolution:"
  "--pipeline.datamanager.eval-resolution[Evaluation resolution (default\: 64)]:pipeline.datamanager.eval-resolution:"
  "--pipeline.datamanager.num-eval-angles[Number of evaluation angles (default\: 256)]:pipeline.datamanager.num-eval-angles:"
  "--pipeline.datamanager.train-images-per-batch[Number of images per batch for training (default\: 1)]:pipeline.datamanager.train-images-per-batch:"
  "--pipeline.datamanager.eval-images-per-batch[Number of images per batch for evaluation (default\: 1)]:pipeline.datamanager.eval-images-per-batch:"
  "--pipeline.datamanager.radius-mean[Mean radius of camera orbit (default\: 2.5)]:pipeline.datamanager.radius-mean:"
  "--pipeline.datamanager.radius-std[Std of radius of camera orbit (default\: 0.1)]:pipeline.datamanager.radius-std:"
  "--pipeline.datamanager.focal-range[Range of focal length (default\: 0.7 1.35)]:pipeline.datamanager.focal-range:"
  "--pipeline.datamanager.vertical-rotation-range[Range of vertical rotation (default\: -90 0)]:pipeline.datamanager.vertical-rotation-range:"
  "--pipeline.datamanager.jitter-std[Std of camera direction jitter, so we don\'t just point the cameras towards the center every time (default\: 0.05)]:pipeline.datamanager.jitter-std:"
  "--pipeline.datamanager.center[Center coordinate of the camera sphere (default\: 0 0 0)]:pipeline.datamanager.center:"
  "--pipeline.datamanager.horizontal-rotation-warmup[How many steps until the full horizontal rotation range is used (default\: 3000)]:pipeline.datamanager.horizontal-rotation-warmup:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[prompt for stable dreamfusion (default\: \'a high quality photo of a ripe pineapple\')]:pipeline.model.prompt:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: white)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.orientation-loss-mult[Orientation loss multipier on computed normals. (default\: 0.001 10.0)]:pipeline.model.orientation-loss-mult:"
  "--pipeline.model.orientation-loss-mult-range[number of iterations to reach last orientation_loss_mult value (default\: 0 15000)]:pipeline.model.orientation-loss-mult-range:"
  "--pipeline.model.random-light-source[Randomizes light source per output. (default\: True)]:pipeline.model.random-light-source:(True False)"
  "--pipeline.model.initialize-density[Initialize density in center of scene. (default\: True)]:pipeline.model.initialize-density:(True False)"
  "--pipeline.model.taper-range[Range of step values for the density tapering (default\: 0 2000)]:pipeline.model.taper-range:"
  "--pipeline.model.taper-strength[Strength schedule of center density (default\: 1.0 0.0)]:pipeline.model.taper-strength:"
  "--pipeline.model.sphere-collider[Use spherical collider instead of box (default\: True)]:pipeline.model.sphere-collider:(True False)"
  "--pipeline.model.random-background[Randomly choose between using background mlp and random color for background (default\: True)]:pipeline.model.random-background:(True False)"
  "--pipeline.model.target-transmittance-start[target transmittance for opacity penalty. This is the percent of the scene that is
background when rendered at the start of training (default\: 0.4)]:pipeline.model.target-transmittance-start:"
  "--pipeline.model.target-transmittance-end[target transmittance for opacity penalty. This is the percent of the scene that is
background when rendered at the end of training (default\: 0.7)]:pipeline.model.target-transmittance-end:"
  "--pipeline.model.transmittance-end-schedule[number of iterations to reach target_transmittance_end (default\: 1500)]:pipeline.model.transmittance-end-schedule:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 256 96)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 48)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 0)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 2000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 2000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 100.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.distortion-loss-mult[Distortion loss multiplier. (default\: 1.0)]:pipeline.model.distortion-loss-mult:"
  "--pipeline.model.start-normals-training[Start training normals after this many iterations (default\: 2000)]:pipeline.model.start-normals-training:"
  "--pipeline.model.start-lambertian-training[start training with lambertian shading after this many iterations (default\: 500)]:pipeline.model.start-lambertian-training:"
  "--pipeline.model.opacity-penalty[enables penalty to encourage sparse weights (penalizing for uniform density along ray) (default\: True)]:pipeline.model.opacity-penalty:(True False)"
  "--pipeline.model.opacity-loss-mult[scale for opacity penalty (default\: 0.001)]:pipeline.model.opacity-loss-mult:"
  "--pipeline.model.max-res[Maximum resolution of the density field. (default\: 256)]:pipeline.model.max-res:"
  "--pipeline.model.location-based-prompting[enables location based prompting (default\: True)]:pipeline.model.location-based-prompting:(True False)"
  "--pipeline.model.interpolated-prompting[enables interpolated location prompting (default\: False)]:pipeline.model.interpolated-prompting:(True False)"
  "--pipeline.model.positional-prompting[how to incorporate position into prompt (default\: discrete)]:pipeline.model.positional-prompting:(discrete interpolated off)"
  "--pipeline.model.top-prompt[appended to prompt for overhead view (default\: \', overhead view\')]:pipeline.model.top-prompt:"
  "--pipeline.model.side-prompt[appended to prompt for side view (default\: \', side view\')]:pipeline.model.side-prompt:"
  "--pipeline.model.front-prompt[appended to prompt for front view (default\: \', front view\')]:pipeline.model.front-prompt:"
  "--pipeline.model.back-prompt[appended to prompt for back view (default\: \', back view\')]:pipeline.model.back-prompt:"
  "--pipeline.model.guidance-scale[guidance scale for sds loss (default\: 25)]:pipeline.model.guidance-scale:"
  "--pipeline.model.diffusion-device[device for diffusion model (default\: None)]:pipeline.model.diffusion-device:"
  "--pipeline.model.diffusion-model[diffusion model for SDS loss (default\: deepfloyd)]:pipeline.model.diffusion-model:(stablediffusion deepfloyd)"
  "--pipeline.model.sd-version[model version when using stable diffusion (default\: 1-5)]:pipeline.model.sd-version:"
  "--pipeline.model.implementation[Which implementation to use for the model. (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 128)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--optimizers.proposal-networks.scheduler[(default\: None)]:optimizers.proposal-networks.scheduler:(None)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.fields.scheduler[(default\: None)]:optimizers.fields.scheduler:(None)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
)

_shtab_tyro_ns_train_igs2gs_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_in2n_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_in2n_small_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_in2n_tiny_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_instant_ngp_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: instant-ngp)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 4096)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.target-num-samples[The target number of samples to use for an entire batch of rays. (default\: 262144)]:pipeline.target-num-samples:"
  "--pipeline.max-num-samples-per-ray[The maximum number of samples to be placed along a ray. (default\: 1024)]:pipeline.max-num-samples-per-ray:"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 4096)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: False)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[Instant NGP doesn\'t use a collider. (default\: None)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 8192)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.grid-resolution[Resolution of the grid used for the field. (default\: 128)]:pipeline.model.grid-resolution:"
  "--pipeline.model.grid-levels[Levels of the grid used for the field. (default\: 4)]:pipeline.model.grid-levels:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 2048)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 19)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.alpha-thre[Threshold for opacity skipping. (default\: 0.01)]:pipeline.model.alpha-thre:"
  "--pipeline.model.cone-angle[Should be set to 0.0 for blender scenes but 1.\/256 for real scenes. (default\: 0.004)]:pipeline.model.cone-angle:"
  "--pipeline.model.render-step-size[Minimum step size for rendering. (default\: None)]:pipeline.model.render-step-size:"
  "--pipeline.model.near-plane[How far along ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: False)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.background-color[The color that is given to masked areas.
These areas are used to force the density in those regions to be zero. (default\: random)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 200000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_instant_ngp_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_instant_ngp_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_instant_ngp_bounded_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: instant-ngp-bounded)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 4096)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.target-num-samples[The target number of samples to use for an entire batch of rays. (default\: 262144)]:pipeline.target-num-samples:"
  "--pipeline.max-num-samples-per-ray[The maximum number of samples to be placed along a ray. (default\: 1024)]:pipeline.max-num-samples-per-ray:"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 8192)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 1024)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: False)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[Instant NGP doesn\'t use a collider. (default\: None)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 8192)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.grid-resolution[Resolution of the grid used for the field. (default\: 128)]:pipeline.model.grid-resolution:"
  "--pipeline.model.grid-levels[Levels of the grid used for the field. (default\: 1)]:pipeline.model.grid-levels:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 2048)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 19)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.alpha-thre[Threshold for opacity skipping. (default\: 0.0)]:pipeline.model.alpha-thre:"
  "--pipeline.model.cone-angle[Should be set to 0.0 for blender scenes but 1.\/256 for real scenes. (default\: 0.0)]:pipeline.model.cone-angle:"
  "--pipeline.model.render-step-size[Minimum step size for rendering. (default\: None)]:pipeline.model.render-step-size:"
  "--pipeline.model.near-plane[How far along ray to start sampling. (default\: 0.01)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: False)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.background-color[The color that is given to masked areas.
These areas are used to force the density in those regions to be zero. (default\: black)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: True)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 200000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_instant_ngp_bounded_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_instant_ngp_bounded_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_instant_ngp_bounded_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_instant_ngp_bounded_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_instant_ngp_bounded_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_instant_ngp_bounded_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_instant_ngp_bounded_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_instant_ngp_bounded_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_bounded_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_bounded_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_bounded_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_bounded_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_instant_ngp_bounded_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_instant_ngp_bounded_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_bounded_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_instant_ngp_bounded_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_instant_ngp_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_instant_ngp_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_instant_ngp_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_instant_ngp_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_instant_ngp_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_instant_ngp_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_instant_ngp_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_instant_ngp_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_instant_ngp_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_instant_ngp_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_kplanes_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_kplanes_dynamic_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_lerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_lerf_big_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_lerf_lite_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_mipnerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: mipnerf)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: wandb)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 1000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 1000000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 1024)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 1024)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.num-processes[Number of processes to use for train data loading. More than 1 doesn\'t result in that much better performance (default\: 1)]:pipeline.datamanager.num-processes:"
  "--pipeline.datamanager.queue-size[Size of shared data queue containing generated ray bundles and batches.
If queue_size \<\= 0, the queue size is infinite. (default\: 2)]:pipeline.datamanager.queue-size:"
  "--pipeline.datamanager.max-thread-workers[Maximum number of threads to use in thread pool executor. If None, use ThreadPool default. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 1024)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-coarse-samples[Number of samples in coarse field evaluation (default\: 128)]:pipeline.model.num-coarse-samples:"
  "--pipeline.model.num-importance-samples[Number of samples in fine field evaluation (default\: 128)]:pipeline.model.num-importance-samples:"
  "--pipeline.model.enable-temporal-distortion[Specifies whether or not to include ray warping based on time. (default\: False)]:pipeline.model.enable-temporal-distortion:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: white)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 0.1)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.temporal-distortion-params.kind[(default\: DNERF)]:pipeline.model.temporal-distortion-params.kind:(DNERF)"
  "--optimizers.fields.scheduler[(default\: None)]:optimizers.fields.scheduler:(None)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
)

_shtab_tyro_ns_train_mipnerf_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_mipnerf_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_mipnerf_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_mipnerf_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_mipnerf_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_mipnerf_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_mipnerf_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_mipnerf_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_mipnerf_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_mipnerf_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_mipnerf_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_mipnerf_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_mipnerf_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_mipnerf_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_mipnerf_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_mipnerf_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_nerfacto_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: nerfacto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 4096)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.num-processes[Number of processes to use for train data loading. More than 1 doesn\'t result in that much better performance (default\: 1)]:pipeline.datamanager.num-processes:"
  "--pipeline.datamanager.queue-size[Size of shared data queue containing generated ray bundles and batches.
If queue_size \<\= 0, the queue size is infinite. (default\: 2)]:pipeline.datamanager.queue-size:"
  "--pipeline.datamanager.max-thread-workers[Maximum number of threads to use in thread pool executor. If None, use ThreadPool default. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: last_sample)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.hidden-dim[Dimension of hidden layers (default\: 64)]:pipeline.model.hidden-dim:"
  "--pipeline.model.hidden-dim-color[Dimension of hidden layers for color network (default\: 64)]:pipeline.model.hidden-dim-color:"
  "--pipeline.model.hidden-dim-transient[Dimension of hidden layers for transient network (default\: 64)]:pipeline.model.hidden-dim-transient:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the base mlp. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.base-res[Resolution of the base grid for the hashgrid. (default\: 16)]:pipeline.model.base-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 2048)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 19)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[How many hashgrid features per level (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 256 96)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 48)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. Piecewise is preferred for unbounded scenes. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.distortion-loss-mult[Distortion loss multiplier. (default\: 0.002)]:pipeline.model.distortion-loss-mult:"
  "--pipeline.model.orientation-loss-mult[Orientation loss multiplier on computed normals. (default\: 0.0001)]:pipeline.model.orientation-loss-mult:"
  "--pipeline.model.pred-normal-loss-mult[Predicted normal loss multiplier. (default\: 0.001)]:pipeline.model.pred-normal-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing. (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: True)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: True)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 1000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.predict-normals[Whether to predict normals or not. (default\: False)]:pipeline.model.predict-normals:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.implementation[Which implementation to use for the model. (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.appearance-embed-dim[Dimension of the appearance embedding. (default\: 32)]:pipeline.model.appearance-embed-dim:"
  "--pipeline.model.average-init-density[Average initial density output from MLP. (default\: 0.01)]:pipeline.model.average-init-density:"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 128)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: SO3xR3)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.proposal-networks.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.proposal-networks.scheduler.lr-pre-warmup:"
  "--optimizers.proposal-networks.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.proposal-networks.scheduler.lr-final:"
  "--optimizers.proposal-networks.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.proposal-networks.scheduler.warmup-steps:"
  "--optimizers.proposal-networks.scheduler.max-steps[The maximum number of steps. (default\: 200000)]:optimizers.proposal-networks.scheduler.max-steps:"
  "--optimizers.proposal-networks.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.proposal-networks.scheduler.ramp:(linear cosine)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 200000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 5000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_nerfacto_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_nerfacto_big_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: nerfacto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 100000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 8192)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.num-processes[Number of processes to use for train data loading. More than 1 doesn\'t result in that much better performance (default\: 1)]:pipeline.datamanager.num-processes:"
  "--pipeline.datamanager.queue-size[Size of shared data queue containing generated ray bundles and batches.
If queue_size \<\= 0, the queue size is infinite. (default\: 2)]:pipeline.datamanager.queue-size:"
  "--pipeline.datamanager.max-thread-workers[Maximum number of threads to use in thread pool executor. If None, use ThreadPool default. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: last_sample)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.hidden-dim[Dimension of hidden layers (default\: 128)]:pipeline.model.hidden-dim:"
  "--pipeline.model.hidden-dim-color[Dimension of hidden layers for color network (default\: 128)]:pipeline.model.hidden-dim-color:"
  "--pipeline.model.hidden-dim-transient[Dimension of hidden layers for transient network (default\: 64)]:pipeline.model.hidden-dim-transient:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the base mlp. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.base-res[Resolution of the base grid for the hashgrid. (default\: 16)]:pipeline.model.base-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 4096)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 21)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[How many hashgrid features per level (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 512 256)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 128)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. Piecewise is preferred for unbounded scenes. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.distortion-loss-mult[Distortion loss multiplier. (default\: 0.002)]:pipeline.model.distortion-loss-mult:"
  "--pipeline.model.orientation-loss-mult[Orientation loss multiplier on computed normals. (default\: 0.0001)]:pipeline.model.orientation-loss-mult:"
  "--pipeline.model.pred-normal-loss-mult[Predicted normal loss multiplier. (default\: 0.001)]:pipeline.model.pred-normal-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing. (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: True)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: True)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 5000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.predict-normals[Whether to predict normals or not. (default\: False)]:pipeline.model.predict-normals:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.implementation[Which implementation to use for the model. (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.appearance-embed-dim[Dimension of the appearance embedding. (default\: 128)]:pipeline.model.appearance-embed-dim:"
  "--pipeline.model.average-init-density[Average initial density output from MLP. (default\: 0.01)]:pipeline.model.average-init-density:"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 128)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: SO3xR3)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.proposal-networks.scheduler[(default\: None)]:optimizers.proposal-networks.scheduler:(None)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 50000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 5000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_nerfacto_big_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_nerfacto_big_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_nerfacto_big_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_nerfacto_big_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_nerfacto_big_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_nerfacto_big_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_nerfacto_big_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_nerfacto_big_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_nerfacto_big_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_nerfacto_big_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_nerfacto_big_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_nerfacto_big_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_nerfacto_big_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_nerfacto_big_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_nerfacto_big_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_nerfacto_big_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_nerfacto_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_nerfacto_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_nerfacto_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_nerfacto_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_nerfacto_huge_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: nerfacto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 100000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 16384)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.num-processes[Number of processes to use for train data loading. More than 1 doesn\'t result in that much better performance (default\: 1)]:pipeline.datamanager.num-processes:"
  "--pipeline.datamanager.queue-size[Size of shared data queue containing generated ray bundles and batches.
If queue_size \<\= 0, the queue size is infinite. (default\: 2)]:pipeline.datamanager.queue-size:"
  "--pipeline.datamanager.max-thread-workers[Maximum number of threads to use in thread pool executor. If None, use ThreadPool default. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: last_sample)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.hidden-dim[Dimension of hidden layers (default\: 256)]:pipeline.model.hidden-dim:"
  "--pipeline.model.hidden-dim-color[Dimension of hidden layers for color network (default\: 256)]:pipeline.model.hidden-dim-color:"
  "--pipeline.model.hidden-dim-transient[Dimension of hidden layers for transient network (default\: 64)]:pipeline.model.hidden-dim-transient:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the base mlp. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.base-res[Resolution of the base grid for the hashgrid. (default\: 16)]:pipeline.model.base-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 8192)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 21)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[How many hashgrid features per level (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 512 512)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 64)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. Piecewise is preferred for unbounded scenes. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.distortion-loss-mult[Distortion loss multiplier. (default\: 0.002)]:pipeline.model.distortion-loss-mult:"
  "--pipeline.model.orientation-loss-mult[Orientation loss multiplier on computed normals. (default\: 0.0001)]:pipeline.model.orientation-loss-mult:"
  "--pipeline.model.pred-normal-loss-mult[Predicted normal loss multiplier. (default\: 0.001)]:pipeline.model.pred-normal-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing. (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: True)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: True)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 5000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.predict-normals[Whether to predict normals or not. (default\: False)]:pipeline.model.predict-normals:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.implementation[Which implementation to use for the model. (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.appearance-embed-dim[Dimension of the appearance embedding. (default\: 32)]:pipeline.model.appearance-embed-dim:"
  "--pipeline.model.average-init-density[Average initial density output from MLP. (default\: 0.01)]:pipeline.model.average-init-density:"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 512)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 7)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 2048)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: SO3xR3)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.proposal-networks.scheduler[(default\: None)]:optimizers.proposal-networks.scheduler:(None)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 50000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 5000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_nerfacto_huge_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_nerfacto_huge_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_nerfacto_huge_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_nerfacto_huge_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_nerfacto_huge_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_nerfacto_huge_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_nerfacto_huge_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_nerfacto_huge_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_nerfacto_huge_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_nerfacto_huge_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_nerfacto_huge_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_nerfacto_huge_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_nerfacto_huge_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_nerfacto_huge_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_nerfacto_huge_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_nerfacto_huge_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_nerfacto_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_nerfacto_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_nerfacto_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_nerfacto_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_nerfacto_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_nerfacto_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_nerfacto_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_nerfacto_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_nerfacto_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_nerfacto_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_nerfacto_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_nerfgs_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_nerfplayer_nerfacto_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_nerfplayer_ngp_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_nerfsh_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_neus_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: neus)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 20000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 5000)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 100000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 1024)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 1024)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 1024)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 4.0)]:pipeline.model.far-plane:"
  "--pipeline.model.far-plane-bg[How far along the ray to stop sampling of the background model. (default\: 1000.0)]:pipeline.model.far-plane-bg:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: black)]:pipeline.model.background-color:(random last_sample white black)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: False)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.eikonal-loss-mult[Monocular normal consistency loss multiplier. (default\: 0.1)]:pipeline.model.eikonal-loss-mult:"
  "--pipeline.model.fg-mask-loss-mult[Foreground mask loss multiplier. (default\: 0.01)]:pipeline.model.fg-mask-loss-mult:"
  "--pipeline.model.mono-normal-loss-mult[Monocular normal consistency loss multiplier. (default\: 0.0)]:pipeline.model.mono-normal-loss-mult:"
  "--pipeline.model.mono-depth-loss-mult[Monocular depth consistency loss multiplier. (default\: 0.0)]:pipeline.model.mono-depth-loss-mult:"
  "--pipeline.model.background-model[background models (default\: mlp)]:pipeline.model.background-model:(grid mlp none)"
  "--pipeline.model.num-samples-outside[Number of samples outside the bounding sphere for background (default\: 32)]:pipeline.model.num-samples-outside:"
  "--pipeline.model.periodic-tvl-mult[Total variational loss multiplier (default\: 0.0)]:pipeline.model.periodic-tvl-mult:"
  "--pipeline.model.overwrite-near-far-plane[whether to use near and far collider from command line (default\: False)]:pipeline.model.overwrite-near-far-plane:(True False)"
  "--pipeline.model.num-samples[Number of uniform samples (default\: 64)]:pipeline.model.num-samples:"
  "--pipeline.model.num-samples-importance[Number of importance samples (default\: 64)]:pipeline.model.num-samples-importance:"
  "--pipeline.model.num-up-sample-steps[number of up sample step, 1 for simple coarse-to-fine sampling (default\: 4)]:pipeline.model.num-up-sample-steps:"
  "--pipeline.model.base-variance[fixed base variance in NeuS sampler, the inv_s will be base \* 2 \*\* iter during upsample (default\: 64)]:pipeline.model.base-variance:"
  "--pipeline.model.perturb[use to use perturb for the sampled points (default\: True)]:pipeline.model.perturb:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.sdf-field.num-layers[Number of layers for geometric network (default\: 8)]:pipeline.model.sdf-field.num-layers:"
  "--pipeline.model.sdf-field.hidden-dim[Number of hidden dimension of geometric network (default\: 256)]:pipeline.model.sdf-field.hidden-dim:"
  "--pipeline.model.sdf-field.geo-feat-dim[Dimension of geometric feature (default\: 256)]:pipeline.model.sdf-field.geo-feat-dim:"
  "--pipeline.model.sdf-field.num-layers-color[Number of layers for color network (default\: 4)]:pipeline.model.sdf-field.num-layers-color:"
  "--pipeline.model.sdf-field.hidden-dim-color[Number of hidden dimension of color network (default\: 256)]:pipeline.model.sdf-field.hidden-dim-color:"
  "--pipeline.model.sdf-field.appearance-embedding-dim[Dimension of appearance embedding (default\: 32)]:pipeline.model.sdf-field.appearance-embedding-dim:"
  "--pipeline.model.sdf-field.use-appearance-embedding[Whether to use appearance embedding (default\: False)]:pipeline.model.sdf-field.use-appearance-embedding:(True False)"
  "--pipeline.model.sdf-field.bias[Sphere size of geometric initialization (default\: 0.8)]:pipeline.model.sdf-field.bias:"
  "--pipeline.model.sdf-field.geometric-init[Whether to use geometric initialization (default\: True)]:pipeline.model.sdf-field.geometric-init:(True False)"
  "--pipeline.model.sdf-field.inside-outside[Whether to revert signed distance value, set to True for indoor scene (default\: True)]:pipeline.model.sdf-field.inside-outside:(True False)"
  "--pipeline.model.sdf-field.weight-norm[Whether to use weight norm for linear layer (default\: True)]:pipeline.model.sdf-field.weight-norm:(True False)"
  "--pipeline.model.sdf-field.use-grid-feature[Whether to use multi-resolution feature grids (default\: False)]:pipeline.model.sdf-field.use-grid-feature:(True False)"
  "--pipeline.model.sdf-field.divide-factor[Normalization factor for multi-resolution grids (default\: 2.0)]:pipeline.model.sdf-field.divide-factor:"
  "--pipeline.model.sdf-field.beta-init[Init learnable beta value for transformation of sdf to density (default\: 0.1)]:pipeline.model.sdf-field.beta-init:"
  "--pipeline.model.sdf-field.encoding-type[(default\: hash)]:pipeline.model.sdf-field.encoding-type:(hash periodic tensorf_vm)"
  "--pipeline.model.sdf-field.num-levels[Number of encoding levels (default\: 16)]:pipeline.model.sdf-field.num-levels:"
  "--pipeline.model.sdf-field.max-res[Maximum resolution of the encoding (default\: 2048)]:pipeline.model.sdf-field.max-res:"
  "--pipeline.model.sdf-field.base-res[Base resolution of the encoding (default\: 16)]:pipeline.model.sdf-field.base-res:"
  "--pipeline.model.sdf-field.log2-hashmap-size[Size of the hash map (default\: 19)]:pipeline.model.sdf-field.log2-hashmap-size:"
  "--pipeline.model.sdf-field.features-per-level[Number of features per encoding level (default\: 2)]:pipeline.model.sdf-field.features-per-level:"
  "--pipeline.model.sdf-field.use-hash[Whether to use hash encoding (default\: True)]:pipeline.model.sdf-field.use-hash:(True False)"
  "--pipeline.model.sdf-field.smoothstep[Whether to use the smoothstep function (default\: True)]:pipeline.model.sdf-field.smoothstep:(True False)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.warm-up-end[Iteration number where warmp ends (default\: 5000)]:optimizers.fields.scheduler.warm-up-end:"
  "--optimizers.fields.scheduler.learning-rate-alpha[Learning rate alpha value (default\: 0.05)]:optimizers.fields.scheduler.learning-rate-alpha:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 300000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.field-background.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.field-background.optimizer.lr:"
  "--optimizers.field-background.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.field-background.optimizer.eps:"
  "--optimizers.field-background.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.field-background.optimizer.max-norm:"
  "--optimizers.field-background.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.field-background.optimizer.weight-decay:"
  "--optimizers.field-background.scheduler.warm-up-end[Iteration number where warmp ends (default\: 5000)]:optimizers.field-background.scheduler.warm-up-end:"
  "--optimizers.field-background.scheduler.learning-rate-alpha[Learning rate alpha value (default\: 0.05)]:optimizers.field-background.scheduler.learning-rate-alpha:"
  "--optimizers.field-background.scheduler.max-steps[The maximum number of steps. (default\: 300000)]:optimizers.field-background.scheduler.max-steps:"
)

_shtab_tyro_ns_train_neus_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_neus_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_neus_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_neus_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_neus_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_neus_facto_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: neus-facto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 5000)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 5000)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 20001)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 2048)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 2048)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 2048)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 4.0)]:pipeline.model.far-plane:"
  "--pipeline.model.far-plane-bg[How far along the ray to stop sampling of the background model. (default\: 1000.0)]:pipeline.model.far-plane-bg:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: black)]:pipeline.model.background-color:(random last_sample white black)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: False)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.eikonal-loss-mult[Monocular normal consistency loss multiplier. (default\: 0.1)]:pipeline.model.eikonal-loss-mult:"
  "--pipeline.model.fg-mask-loss-mult[Foreground mask loss multiplier. (default\: 0.01)]:pipeline.model.fg-mask-loss-mult:"
  "--pipeline.model.mono-normal-loss-mult[Monocular normal consistency loss multiplier. (default\: 0.0)]:pipeline.model.mono-normal-loss-mult:"
  "--pipeline.model.mono-depth-loss-mult[Monocular depth consistency loss multiplier. (default\: 0.0)]:pipeline.model.mono-depth-loss-mult:"
  "--pipeline.model.background-model[background models (default\: none)]:pipeline.model.background-model:(grid mlp none)"
  "--pipeline.model.num-samples-outside[Number of samples outside the bounding sphere for background (default\: 32)]:pipeline.model.num-samples-outside:"
  "--pipeline.model.periodic-tvl-mult[Total variational loss multiplier (default\: 0.0)]:pipeline.model.periodic-tvl-mult:"
  "--pipeline.model.overwrite-near-far-plane[whether to use near and far collider from command line (default\: False)]:pipeline.model.overwrite-near-far-plane:(True False)"
  "--pipeline.model.num-samples[Number of uniform samples (default\: 64)]:pipeline.model.num-samples:"
  "--pipeline.model.num-samples-importance[Number of importance samples (default\: 64)]:pipeline.model.num-samples-importance:"
  "--pipeline.model.num-up-sample-steps[number of up sample step, 1 for simple coarse-to-fine sampling (default\: 4)]:pipeline.model.num-up-sample-steps:"
  "--pipeline.model.base-variance[fixed base variance in NeuS sampler, the inv_s will be base \* 2 \*\* iter during upsample (default\: 64)]:pipeline.model.base-variance:"
  "--pipeline.model.perturb[use to use perturb for the sampled points (default\: True)]:pipeline.model.perturb:(True False)"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for the proposal network. (default\: 256 96)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-neus-samples-per-ray[Number of samples per ray for the nerf network. (default\: 48)]:pipeline.model.num-neus-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing. (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 1000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.sdf-field.num-layers[Number of layers for geometric network (default\: 2)]:pipeline.model.sdf-field.num-layers:"
  "--pipeline.model.sdf-field.hidden-dim[Number of hidden dimension of geometric network (default\: 256)]:pipeline.model.sdf-field.hidden-dim:"
  "--pipeline.model.sdf-field.geo-feat-dim[Dimension of geometric feature (default\: 256)]:pipeline.model.sdf-field.geo-feat-dim:"
  "--pipeline.model.sdf-field.num-layers-color[Number of layers for color network (default\: 2)]:pipeline.model.sdf-field.num-layers-color:"
  "--pipeline.model.sdf-field.hidden-dim-color[Number of hidden dimension of color network (default\: 256)]:pipeline.model.sdf-field.hidden-dim-color:"
  "--pipeline.model.sdf-field.appearance-embedding-dim[Dimension of appearance embedding (default\: 32)]:pipeline.model.sdf-field.appearance-embedding-dim:"
  "--pipeline.model.sdf-field.use-appearance-embedding[Whether to use appearance embedding (default\: False)]:pipeline.model.sdf-field.use-appearance-embedding:(True False)"
  "--pipeline.model.sdf-field.bias[Sphere size of geometric initialization (default\: 0.5)]:pipeline.model.sdf-field.bias:"
  "--pipeline.model.sdf-field.geometric-init[Whether to use geometric initialization (default\: True)]:pipeline.model.sdf-field.geometric-init:(True False)"
  "--pipeline.model.sdf-field.inside-outside[Whether to revert signed distance value, set to True for indoor scene (default\: True)]:pipeline.model.sdf-field.inside-outside:(True False)"
  "--pipeline.model.sdf-field.weight-norm[Whether to use weight norm for linear layer (default\: True)]:pipeline.model.sdf-field.weight-norm:(True False)"
  "--pipeline.model.sdf-field.use-grid-feature[Whether to use multi-resolution feature grids (default\: True)]:pipeline.model.sdf-field.use-grid-feature:(True False)"
  "--pipeline.model.sdf-field.divide-factor[Normalization factor for multi-resolution grids (default\: 2.0)]:pipeline.model.sdf-field.divide-factor:"
  "--pipeline.model.sdf-field.beta-init[Init learnable beta value for transformation of sdf to density (default\: 0.8)]:pipeline.model.sdf-field.beta-init:"
  "--pipeline.model.sdf-field.encoding-type[(default\: hash)]:pipeline.model.sdf-field.encoding-type:(hash periodic tensorf_vm)"
  "--pipeline.model.sdf-field.num-levels[Number of encoding levels (default\: 16)]:pipeline.model.sdf-field.num-levels:"
  "--pipeline.model.sdf-field.max-res[Maximum resolution of the encoding (default\: 2048)]:pipeline.model.sdf-field.max-res:"
  "--pipeline.model.sdf-field.base-res[Base resolution of the encoding (default\: 16)]:pipeline.model.sdf-field.base-res:"
  "--pipeline.model.sdf-field.log2-hashmap-size[Size of the hash map (default\: 19)]:pipeline.model.sdf-field.log2-hashmap-size:"
  "--pipeline.model.sdf-field.features-per-level[Number of features per encoding level (default\: 2)]:pipeline.model.sdf-field.features-per-level:"
  "--pipeline.model.sdf-field.use-hash[Whether to use hash encoding (default\: True)]:pipeline.model.sdf-field.use-hash:(True False)"
  "--pipeline.model.sdf-field.smoothstep[Whether to use the smoothstep function (default\: True)]:pipeline.model.sdf-field.smoothstep:(True False)"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 64)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.proposal-networks.scheduler.max-steps[The maximum number of steps. (default\: 20001)]:optimizers.proposal-networks.scheduler.max-steps:"
  "--optimizers.proposal-networks.scheduler.gamma[The learning rate decay factor. (default\: 0.33)]:optimizers.proposal-networks.scheduler.gamma:"
  "--optimizers.proposal-networks.scheduler.milestones[The milestone steps at which to decay the learning rate. (default\: 10000 1500 18000)]:optimizers.proposal-networks.scheduler.milestones:"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.warm-up-end[Iteration number where warmp ends (default\: 500)]:optimizers.fields.scheduler.warm-up-end:"
  "--optimizers.fields.scheduler.learning-rate-alpha[Learning rate alpha value (default\: 0.05)]:optimizers.fields.scheduler.learning-rate-alpha:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 20001)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.field-background.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.field-background.optimizer.lr:"
  "--optimizers.field-background.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.field-background.optimizer.eps:"
  "--optimizers.field-background.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.field-background.optimizer.max-norm:"
  "--optimizers.field-background.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.field-background.optimizer.weight-decay:"
  "--optimizers.field-background.scheduler.warm-up-end[Iteration number where warmp ends (default\: 500)]:optimizers.field-background.scheduler.warm-up-end:"
  "--optimizers.field-background.scheduler.learning-rate-alpha[Learning rate alpha value (default\: 0.05)]:optimizers.field-background.scheduler.learning-rate-alpha:"
  "--optimizers.field-background.scheduler.max-steps[The maximum number of steps. (default\: 20001)]:optimizers.field-background.scheduler.max-steps:"
)

_shtab_tyro_ns_train_neus_facto_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_neus_facto_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_neus_facto_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_neus_facto_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_neus_facto_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_neus_facto_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_neus_facto_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_neus_facto_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_neus_facto_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_neus_facto_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_neus_facto_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_neus_facto_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_neus_facto_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_neus_facto_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_neus_facto_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_neus_facto_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_neus_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_neus_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_neus_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_neus_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_neus_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_neus_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_neus_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_neus_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_neus_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_neus_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_neus_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_phototourism_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: phototourism)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 4096)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: 40)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: 100)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: 40)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: 100)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: last_sample)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.hidden-dim[Dimension of hidden layers (default\: 64)]:pipeline.model.hidden-dim:"
  "--pipeline.model.hidden-dim-color[Dimension of hidden layers for color network (default\: 64)]:pipeline.model.hidden-dim-color:"
  "--pipeline.model.hidden-dim-transient[Dimension of hidden layers for transient network (default\: 64)]:pipeline.model.hidden-dim-transient:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the base mlp. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.base-res[Resolution of the base grid for the hashgrid. (default\: 16)]:pipeline.model.base-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 2048)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 19)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[How many hashgrid features per level (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 256 96)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 48)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. Piecewise is preferred for unbounded scenes. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.distortion-loss-mult[Distortion loss multiplier. (default\: 0.002)]:pipeline.model.distortion-loss-mult:"
  "--pipeline.model.orientation-loss-mult[Orientation loss multiplier on computed normals. (default\: 0.0001)]:pipeline.model.orientation-loss-mult:"
  "--pipeline.model.pred-normal-loss-mult[Predicted normal loss multiplier. (default\: 0.001)]:pipeline.model.pred-normal-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing. (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: True)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: True)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 1000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.predict-normals[Whether to predict normals or not. (default\: False)]:pipeline.model.predict-normals:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.implementation[Which implementation to use for the model. (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.appearance-embed-dim[Dimension of the appearance embedding. (default\: 32)]:pipeline.model.appearance-embed-dim:"
  "--pipeline.model.average-init-density[Average initial density output from MLP. (default\: 1.0)]:pipeline.model.average-init-density:"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 128)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: SO3xR3)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.proposal-networks.scheduler[(default\: None)]:optimizers.proposal-networks.scheduler:(None)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.fields.scheduler[(default\: None)]:optimizers.fields.scheduler:(None)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 5000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_phototourism_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_phototourism_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_phototourism_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_phototourism_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_phototourism_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_phototourism_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_phototourism_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_phototourism_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_phototourism_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_phototourism_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_phototourism_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_phototourism_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_phototourism_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_phototourism_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_phototourism_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_phototourism_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_pynerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_pynerf_occupancy_grid_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_pynerf_synthetic_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_seathru_nerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: seathru-nerf)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 100000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 16384)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[Near plane of rays. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[Far plane of rays. (default\: 10.0)]:pipeline.model.far-plane:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the object base MLP. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.min-res[Minimum resolution of the hashmap for the object base MLP. (default\: 16)]:pipeline.model.min-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the object base MLP. (default\: 8192)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the object base MLP. (default\: 21)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[Number of features per level of the hashmap for the object base MLP. (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-layers[Number of hidden layers for the object base MLP. (default\: 2)]:pipeline.model.num-layers:"
  "--pipeline.model.hidden-dim[Dimension of hidden layers for the object base MLP. (default\: 256)]:pipeline.model.hidden-dim:"
  "--pipeline.model.bottleneck-dim[Bottleneck dimension between object base MLP and object head MLP. (default\: 63)]:pipeline.model.bottleneck-dim:"
  "--pipeline.model.num-layers-colour[Number of hidden layers for colour MLP. (default\: 3)]:pipeline.model.num-layers-colour:"
  "--pipeline.model.hidden-dim-colour[Dimension of hidden layers for colour MLP. (default\: 256)]:pipeline.model.hidden-dim-colour:"
  "--pipeline.model.num-layers-medium[Number of hidden layers for medium MLP. (default\: 2)]:pipeline.model.num-layers-medium:"
  "--pipeline.model.hidden-dim-medium[Dimension of hidden layers for medium MLP. (default\: 128)]:pipeline.model.hidden-dim-medium:"
  "--pipeline.model.implementation[Implementation of the MLPs (tcnn or torch). (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.use-viewing-dir-obj-rgb[Whether to use viewing direction in object rgb MLP. (default\: False)]:pipeline.model.use-viewing-dir-obj-rgb:(True False)"
  "--pipeline.model.object-density-bias[Bias for object density. (default\: 0.0)]:pipeline.model.object-density-bias:"
  "--pipeline.model.medium-density-bias[Bias for medium density (sigma_bs and sigma_attn). (default\: 0.0)]:pipeline.model.medium-density-bias:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 256 128)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 64)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup. (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps. (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Whether to use the same proposal network. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing (this gives an exploration at the         beginning of training). (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 15000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to         the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.initial-acc-loss-mult[Initial accuracy loss multiplier. (default\: 0.0001)]:pipeline.model.initial-acc-loss-mult:"
  "--pipeline.model.final-acc-loss-mult[Final accuracy loss multiplier. (default\: 0.0001)]:pipeline.model.final-acc-loss-mult:"
  "--pipeline.model.acc-decay[Decay of the accuracy loss multiplier. (After this many steps, acc_loss_mult \=         final_acc_loss_mult.) (default\: 10000)]:pipeline.model.acc-decay:"
  "--pipeline.model.rgb-loss-use-bayer-mask[Whether to use a Bayer mask for the RGB loss. (default\: False)]:pipeline.model.rgb-loss-use-bayer-mask:(True False)"
  "--pipeline.model.prior-on[Prior on the proposal weights or transmittance. (default\: transmittance)]:pipeline.model.prior-on:(weights transmittance)"
  "--pipeline.model.debug[Whether to save debug information. (default\: False)]:pipeline.model.debug:(True False)"
  "--pipeline.model.beta-prior[Beta hyperparameter for the prior used in the acc_loss. (default\: 100.0)]:pipeline.model.beta-prior:"
  "--pipeline.model.use-new-rendering-eqs[Whether to use the new rendering equations. (default\: True)]:pipeline.model.use-new-rendering-eqs:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 512)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 7)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 2048)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.002)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.proposal-networks.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.proposal-networks.scheduler.lr-pre-warmup:"
  "--optimizers.proposal-networks.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 1e-05)]:optimizers.proposal-networks.scheduler.lr-final:"
  "--optimizers.proposal-networks.scheduler.warmup-steps[Number of warmup steps. (default\: 1024)]:optimizers.proposal-networks.scheduler.warmup-steps:"
  "--optimizers.proposal-networks.scheduler.max-steps[The maximum number of steps. (default\: 500000)]:optimizers.proposal-networks.scheduler.max-steps:"
  "--optimizers.proposal-networks.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.proposal-networks.scheduler.ramp:(linear cosine)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.002)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: 0.001)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 1e-05)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 1024)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 500000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.mode[(default\: off)]:optimizers.camera-opt.mode:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.0006)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0.01)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 6e-06)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 500000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_seathru_nerf_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_seathru_nerf_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_seathru_nerf_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_seathru_nerf_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_seathru_nerf_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_seathru_nerf_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_seathru_nerf_lite_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: seathru-nerf-lite)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 50000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 8192)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 32768)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[Near plane of rays. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[Far plane of rays. (default\: 10.0)]:pipeline.model.far-plane:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the object base MLP. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.min-res[Minimum resolution of the hashmap for the object base MLP. (default\: 16)]:pipeline.model.min-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the object base MLP. (default\: 2048)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the object base MLP. (default\: 19)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[Number of features per level of the hashmap for the object base MLP. (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-layers[Number of hidden layers for the object base MLP. (default\: 2)]:pipeline.model.num-layers:"
  "--pipeline.model.hidden-dim[Dimension of hidden layers for the object base MLP. (default\: 64)]:pipeline.model.hidden-dim:"
  "--pipeline.model.bottleneck-dim[Bottleneck dimension between object base MLP and object head MLP. (default\: 31)]:pipeline.model.bottleneck-dim:"
  "--pipeline.model.num-layers-colour[Number of hidden layers for colour MLP. (default\: 3)]:pipeline.model.num-layers-colour:"
  "--pipeline.model.hidden-dim-colour[Dimension of hidden layers for colour MLP. (default\: 64)]:pipeline.model.hidden-dim-colour:"
  "--pipeline.model.num-layers-medium[Number of hidden layers for medium MLP. (default\: 2)]:pipeline.model.num-layers-medium:"
  "--pipeline.model.hidden-dim-medium[Dimension of hidden layers for medium MLP. (default\: 64)]:pipeline.model.hidden-dim-medium:"
  "--pipeline.model.implementation[Implementation of the MLPs (tcnn or torch). (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.use-viewing-dir-obj-rgb[Whether to use viewing direction in object rgb MLP. (default\: False)]:pipeline.model.use-viewing-dir-obj-rgb:(True False)"
  "--pipeline.model.object-density-bias[Bias for object density. (default\: 0.0)]:pipeline.model.object-density-bias:"
  "--pipeline.model.medium-density-bias[Bias for medium density (sigma_bs and sigma_attn). (default\: 0.0)]:pipeline.model.medium-density-bias:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 256 128)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 64)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup. (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps. (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Whether to use the same proposal network. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing (this gives an exploration at the         beginning of training). (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 15000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to         the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.initial-acc-loss-mult[Initial accuracy loss multiplier. (default\: 0.0001)]:pipeline.model.initial-acc-loss-mult:"
  "--pipeline.model.final-acc-loss-mult[Final accuracy loss multiplier. (default\: 0.0001)]:pipeline.model.final-acc-loss-mult:"
  "--pipeline.model.acc-decay[Decay of the accuracy loss multiplier. (After this many steps, acc_loss_mult \=         final_acc_loss_mult.) (default\: 10000)]:pipeline.model.acc-decay:"
  "--pipeline.model.rgb-loss-use-bayer-mask[Whether to use a Bayer mask for the RGB loss. (default\: False)]:pipeline.model.rgb-loss-use-bayer-mask:(True False)"
  "--pipeline.model.prior-on[Prior on the proposal weights or transmittance. (default\: transmittance)]:pipeline.model.prior-on:(weights transmittance)"
  "--pipeline.model.debug[Whether to save debug information. (default\: False)]:pipeline.model.debug:(True False)"
  "--pipeline.model.beta-prior[Beta hyperparameter for the prior used in the acc_loss. (default\: 100.0)]:pipeline.model.beta-prior:"
  "--pipeline.model.use-new-rendering-eqs[Whether to use the new rendering equations. (default\: True)]:pipeline.model.use-new-rendering-eqs:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 128)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.002)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.proposal-networks.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.proposal-networks.scheduler.lr-pre-warmup:"
  "--optimizers.proposal-networks.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 1e-05)]:optimizers.proposal-networks.scheduler.lr-final:"
  "--optimizers.proposal-networks.scheduler.warmup-steps[Number of warmup steps. (default\: 1024)]:optimizers.proposal-networks.scheduler.warmup-steps:"
  "--optimizers.proposal-networks.scheduler.max-steps[The maximum number of steps. (default\: 500000)]:optimizers.proposal-networks.scheduler.max-steps:"
  "--optimizers.proposal-networks.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.proposal-networks.scheduler.ramp:(linear cosine)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.002)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: 0.001)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 1e-05)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 1024)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 500000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.mode[(default\: off)]:optimizers.camera-opt.mode:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.0006)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0.01)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 6e-06)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 500000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_seathru_nerf_lite_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_seathru_nerf_lite_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_seathru_nerf_lite_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_seathru_nerf_lite_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_seathru_nerf_lite_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_seathru_nerf_lite_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_seathru_nerf_lite_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_seathru_nerf_lite_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_lite_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_lite_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_lite_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_lite_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_seathru_nerf_lite_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_seathru_nerf_lite_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_lite_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_seathru_nerf_lite_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_seathru_nerf_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_seathru_nerf_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_seathru_nerf_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_seathru_nerf_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_seathru_nerf_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_seathru_nerf_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_semantic_nerfw_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: semantic-nerfw)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: True)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 65536)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 4096)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 8192)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 65536)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.near-plane[How far along the ray to start sampling. (default\: 0.05)]:pipeline.model.near-plane:"
  "--pipeline.model.far-plane[How far along the ray to stop sampling. (default\: 1000.0)]:pipeline.model.far-plane:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: last_sample)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.hidden-dim[Dimension of hidden layers (default\: 64)]:pipeline.model.hidden-dim:"
  "--pipeline.model.hidden-dim-color[Dimension of hidden layers for color network (default\: 64)]:pipeline.model.hidden-dim-color:"
  "--pipeline.model.hidden-dim-transient[Dimension of hidden layers for transient network (default\: 64)]:pipeline.model.hidden-dim-transient:"
  "--pipeline.model.num-levels[Number of levels of the hashmap for the base mlp. (default\: 16)]:pipeline.model.num-levels:"
  "--pipeline.model.base-res[Resolution of the base grid for the hashgrid. (default\: 16)]:pipeline.model.base-res:"
  "--pipeline.model.max-res[Maximum resolution of the hashmap for the base mlp. (default\: 2048)]:pipeline.model.max-res:"
  "--pipeline.model.log2-hashmap-size[Size of the hashmap for the base mlp (default\: 19)]:pipeline.model.log2-hashmap-size:"
  "--pipeline.model.features-per-level[How many hashgrid features per level (default\: 2)]:pipeline.model.features-per-level:"
  "--pipeline.model.num-proposal-samples-per-ray[Number of samples per ray for each proposal network. (default\: 256 96)]:pipeline.model.num-proposal-samples-per-ray:"
  "--pipeline.model.num-nerf-samples-per-ray[Number of samples per ray for the nerf network. (default\: 48)]:pipeline.model.num-nerf-samples-per-ray:"
  "--pipeline.model.proposal-update-every[Sample every n steps after the warmup (default\: 5)]:pipeline.model.proposal-update-every:"
  "--pipeline.model.proposal-warmup[Scales n from 1 to proposal_update_every over this many steps (default\: 5000)]:pipeline.model.proposal-warmup:"
  "--pipeline.model.num-proposal-iterations[Number of proposal network iterations. (default\: 2)]:pipeline.model.num-proposal-iterations:"
  "--pipeline.model.use-same-proposal-network[Use the same proposal network. Otherwise use different ones. (default\: False)]:pipeline.model.use-same-proposal-network:(True False)"
  "--pipeline.model.proposal-initial-sampler[Initial sampler for the proposal network. Piecewise is preferred for unbounded scenes. (default\: piecewise)]:pipeline.model.proposal-initial-sampler:(piecewise uniform)"
  "--pipeline.model.interlevel-loss-mult[Proposal loss multiplier. (default\: 1.0)]:pipeline.model.interlevel-loss-mult:"
  "--pipeline.model.distortion-loss-mult[Distortion loss multiplier. (default\: 0.002)]:pipeline.model.distortion-loss-mult:"
  "--pipeline.model.orientation-loss-mult[Orientation loss multiplier on computed normals. (default\: 0.0001)]:pipeline.model.orientation-loss-mult:"
  "--pipeline.model.pred-normal-loss-mult[Predicted normal loss multiplier. (default\: 0.001)]:pipeline.model.pred-normal-loss-mult:"
  "--pipeline.model.use-proposal-weight-anneal[Whether to use proposal weight annealing. (default\: True)]:pipeline.model.use-proposal-weight-anneal:(True False)"
  "--pipeline.model.use-appearance-embedding[Whether to use an appearance embedding. (default\: True)]:pipeline.model.use-appearance-embedding:(True False)"
  "--pipeline.model.use-average-appearance-embedding[Whether to use average appearance embedding or zeros for inference. (default\: True)]:pipeline.model.use-average-appearance-embedding:(True False)"
  "--pipeline.model.proposal-weights-anneal-slope[Slope of the annealing function for the proposal weights. (default\: 10.0)]:pipeline.model.proposal-weights-anneal-slope:"
  "--pipeline.model.proposal-weights-anneal-max-num-iters[Max num iterations for the annealing function. (default\: 1000)]:pipeline.model.proposal-weights-anneal-max-num-iters:"
  "--pipeline.model.use-single-jitter[Whether use single jitter or not for the proposal networks. (default\: True)]:pipeline.model.use-single-jitter:(True False)"
  "--pipeline.model.predict-normals[Whether to predict normals or not. (default\: False)]:pipeline.model.predict-normals:(True False)"
  "--pipeline.model.disable-scene-contraction[Whether to disable scene contraction or not. (default\: False)]:pipeline.model.disable-scene-contraction:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.implementation[Which implementation to use for the model. (default\: tcnn)]:pipeline.model.implementation:(tcnn torch)"
  "--pipeline.model.appearance-embed-dim[Dimension of the appearance embedding. (default\: 32)]:pipeline.model.appearance-embed-dim:"
  "--pipeline.model.average-init-density[Average initial density output from MLP. (default\: 1.0)]:pipeline.model.average-init-density:"
  "--pipeline.model.use-transient-embedding[Whether to use transient embedding. (default\: False)]:pipeline.model.use-transient-embedding:(True False)"
  "--pipeline.model.semantic-loss-weight[(default\: 1.0)]:pipeline.model.semantic-loss-weight:"
  "--pipeline.model.pass-semantic-gradients[(default\: False)]:pipeline.model.pass-semantic-gradients:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.proposal-net-args-list.0.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.0.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.0.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.0.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.0.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.0.num-levels:"
  "--pipeline.model.proposal-net-args-list.0.max-res[(default\: 128)]:pipeline.model.proposal-net-args-list.0.max-res:"
  "--pipeline.model.proposal-net-args-list.0.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.0.use-linear:(True False)"
  "--pipeline.model.proposal-net-args-list.1.hidden-dim[(default\: 16)]:pipeline.model.proposal-net-args-list.1.hidden-dim:"
  "--pipeline.model.proposal-net-args-list.1.log2-hashmap-size[(default\: 17)]:pipeline.model.proposal-net-args-list.1.log2-hashmap-size:"
  "--pipeline.model.proposal-net-args-list.1.num-levels[(default\: 5)]:pipeline.model.proposal-net-args-list.1.num-levels:"
  "--pipeline.model.proposal-net-args-list.1.max-res[(default\: 256)]:pipeline.model.proposal-net-args-list.1.max-res:"
  "--pipeline.model.proposal-net-args-list.1.use-linear[(default\: False)]:pipeline.model.proposal-net-args-list.1.use-linear:(True False)"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: SO3xR3)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.proposal-networks.scheduler[(default\: None)]:optimizers.proposal-networks.scheduler:(None)"
  "--optimizers.proposal-networks.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.proposal-networks.optimizer.lr:"
  "--optimizers.proposal-networks.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.proposal-networks.optimizer.eps:"
  "--optimizers.proposal-networks.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.proposal-networks.optimizer.max-norm:"
  "--optimizers.proposal-networks.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.proposal-networks.optimizer.weight-decay:"
  "--optimizers.fields.scheduler[(default\: None)]:optimizers.fields.scheduler:(None)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.01)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
)

_shtab_tyro_ns_train_semantic_nerfw_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_semantic_nerfw_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_semantic_nerfw_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_semantic_nerfw_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_semantic_nerfw_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_semantic_nerfw_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_semantic_nerfw_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_semantic_nerfw_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_semantic_nerfw_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_semantic_nerfw_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_semantic_nerfw_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_semantic_nerfw_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_semantic_nerfw_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_semantic_nerfw_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_semantic_nerfw_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_semantic_nerfw_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_signerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_signerf_nerfacto_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_splatfacto_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: splatfacto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 0)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 100)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.cache-images[Whether to cache images in memory. If \"cpu\", caches on cpu. If \"gpu\", caches on device. (default\: gpu)]:pipeline.datamanager.cache-images:(cpu gpu)"
  "--pipeline.datamanager.cache-images-type[The image type returned from manager, caching images in uint8 saves memory (default\: uint8)]:pipeline.datamanager.cache-images-type:(uint8 float32)"
  "--pipeline.datamanager.max-thread-workers[The maximum number of threads to use for caching images. If None, uses all available threads. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.train-cameras-sampling-strategy[Specifies which sampling strategy is used to generate train cameras, \'random\' means sampling
uniformly random without replacement, \'fps\' means farthest point sampling which is helpful to reduce the artifacts
due to oversampling subsets of cameras that are very close to each other. (default\: random)]:pipeline.datamanager.train-cameras-sampling-strategy:(random fps)"
  "--pipeline.datamanager.train-cameras-sampling-seed[Random seed for sampling train cameras. Fixing seed may help reduce variance of trained models across
different runs. (default\: 42)]:pipeline.datamanager.train-cameras-sampling-seed:"
  "--pipeline.datamanager.fps-reset-every[The number of iterations before one resets fps sampler repeatly, which is essentially drawing fps_reset_every
samples from the pool of all training cameras without replacement before a new round of sampling starts. (default\: 100)]:pipeline.datamanager.fps-reset-every:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.warmup-length[period of steps where refinement is turned off (default\: 500)]:pipeline.model.warmup-length:"
  "--pipeline.model.refine-every[period of steps where gaussians are culled and densified (default\: 100)]:pipeline.model.refine-every:"
  "--pipeline.model.resolution-schedule[training starts at 1\/d resolution, every n steps this is doubled (default\: 3000)]:pipeline.model.resolution-schedule:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: random)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.num-downscales[at the beginning, resolution is 1\/2\^d, where d is this number (default\: 2)]:pipeline.model.num-downscales:"
  "--pipeline.model.cull-alpha-thresh[threshold of opacity for culling gaussians. One can set it to a lower value (e.g. 0.005) for higher quality. (default\: 0.1)]:pipeline.model.cull-alpha-thresh:"
  "--pipeline.model.cull-scale-thresh[threshold of scale for culling huge gaussians (default\: 0.5)]:pipeline.model.cull-scale-thresh:"
  "--pipeline.model.reset-alpha-every[Every this many refinement steps, reset the alpha (default\: 30)]:pipeline.model.reset-alpha-every:"
  "--pipeline.model.densify-grad-thresh[threshold of positional gradient norm for densifying gaussians (default\: 0.0008)]:pipeline.model.densify-grad-thresh:"
  "--pipeline.model.use-absgrad[Whether to use absgrad to densify gaussians, if False, will use grad rather than absgrad (default\: True)]:pipeline.model.use-absgrad:(True False)"
  "--pipeline.model.densify-size-thresh[below this size, gaussians are \*duplicated\*, otherwise split (default\: 0.01)]:pipeline.model.densify-size-thresh:"
  "--pipeline.model.n-split-samples[number of samples to split gaussians into (default\: 2)]:pipeline.model.n-split-samples:"
  "--pipeline.model.sh-degree-interval[every n intervals turn on another sh degree (default\: 1000)]:pipeline.model.sh-degree-interval:"
  "--pipeline.model.cull-screen-size[if a gaussian is more than this percent of screen space, cull it (default\: 0.15)]:pipeline.model.cull-screen-size:"
  "--pipeline.model.split-screen-size[if a gaussian is more than this percent of screen space, split it (default\: 0.05)]:pipeline.model.split-screen-size:"
  "--pipeline.model.stop-screen-size-at[stop culling\/splitting at this step WRT screen size of gaussians (default\: 4000)]:pipeline.model.stop-screen-size-at:"
  "--pipeline.model.random-init[whether to initialize the positions uniformly randomly (not SFM points) (default\: False)]:pipeline.model.random-init:(True False)"
  "--pipeline.model.num-random[Number of gaussians to initialize if random init is used (default\: 50000)]:pipeline.model.num-random:"
  "--pipeline.model.random-scale[Size of the cube to initialize random gaussians within (default\: 10.0)]:pipeline.model.random-scale:"
  "--pipeline.model.ssim-lambda[weight of ssim loss (default\: 0.2)]:pipeline.model.ssim-lambda:"
  "--pipeline.model.stop-split-at[stop splitting at this step (default\: 15000)]:pipeline.model.stop-split-at:"
  "--pipeline.model.sh-degree[maximum degree of spherical harmonics to use (default\: 3)]:pipeline.model.sh-degree:"
  "--pipeline.model.use-scale-regularization[If enabled, a scale regularization introduced in PhysGauss (https\:\/\/xpandora.github.io\/PhysGaussian\/) is used for reducing huge spikey gaussians. (default\: False)]:pipeline.model.use-scale-regularization:(True False)"
  "--pipeline.model.max-gauss-ratio[threshold of ratio of gaussian max to min scale before applying regularization
loss from the PhysGaussian paper (default\: 10.0)]:pipeline.model.max-gauss-ratio:"
  "--pipeline.model.output-depth-during-training[If True, output depth during training. Otherwise, only output depth during evaluation. (default\: False)]:pipeline.model.output-depth-during-training:(True False)"
  "--pipeline.model.rasterize-mode[Classic mode of rendering will use the EWA volume splatting with a \[0.3, 0.3\] screen space blurring kernel. This
approach is however not suitable to render tiny gaussians at higher or lower resolution than the captured, which
results \"aliasing-like\" artifacts. The antialiased mode overcomes this limitation by calculating compensation factors
and apply them to the opacities of gaussians to preserve the total integrated density of splats.


However, PLY exported with antialiased rasterize mode is not compatible with classic mode. Thus many web viewers that
were implemented for classic mode can not render antialiased mode PLY properly without modifications. (default\: classic)]:pipeline.model.rasterize-mode:(classic antialiased)"
  "--pipeline.model.use-bilateral-grid[If True, use bilateral grid to handle the ISP changes in the image space. This technique was introduced in the paper \'Bilateral Guided Radiance Field Processing\' (https\:\/\/bilarfpro.github.io\/). (default\: False)]:pipeline.model.use-bilateral-grid:(True False)"
  "--pipeline.model.grid-shape[Shape of the bilateral grid (X, Y, W) (default\: 16 16 8)]:pipeline.model.grid-shape:"
  "--pipeline.model.color-corrected-metrics[If True, apply color correction to the rendered images before computing the metrics. (default\: False)]:pipeline.model.color-corrected-metrics:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: off)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.means.optimizer.lr[The learning rate to use. (default\: 0.00016)]:optimizers.means.optimizer.lr:"
  "--optimizers.means.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.means.optimizer.eps:"
  "--optimizers.means.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.means.optimizer.max-norm:"
  "--optimizers.means.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.means.optimizer.weight-decay:"
  "--optimizers.means.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.means.scheduler.lr-pre-warmup:"
  "--optimizers.means.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 1.6e-06)]:optimizers.means.scheduler.lr-final:"
  "--optimizers.means.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.means.scheduler.warmup-steps:"
  "--optimizers.means.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.means.scheduler.max-steps:"
  "--optimizers.means.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.means.scheduler.ramp:(linear cosine)"
  "--optimizers.features-dc.scheduler[(default\: None)]:optimizers.features-dc.scheduler:(None)"
  "--optimizers.features-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.features-dc.optimizer.lr:"
  "--optimizers.features-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-dc.optimizer.eps:"
  "--optimizers.features-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-dc.optimizer.max-norm:"
  "--optimizers.features-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-dc.optimizer.weight-decay:"
  "--optimizers.features-rest.scheduler[(default\: None)]:optimizers.features-rest.scheduler:(None)"
  "--optimizers.features-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.features-rest.optimizer.lr:"
  "--optimizers.features-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-rest.optimizer.eps:"
  "--optimizers.features-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-rest.optimizer.max-norm:"
  "--optimizers.features-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-rest.optimizer.weight-decay:"
  "--optimizers.opacities.scheduler[(default\: None)]:optimizers.opacities.scheduler:(None)"
  "--optimizers.opacities.optimizer.lr[The learning rate to use. (default\: 0.05)]:optimizers.opacities.optimizer.lr:"
  "--optimizers.opacities.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.opacities.optimizer.eps:"
  "--optimizers.opacities.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.opacities.optimizer.max-norm:"
  "--optimizers.opacities.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.opacities.optimizer.weight-decay:"
  "--optimizers.scales.scheduler[(default\: None)]:optimizers.scales.scheduler:(None)"
  "--optimizers.scales.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.scales.optimizer.lr:"
  "--optimizers.scales.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.scales.optimizer.eps:"
  "--optimizers.scales.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.scales.optimizer.max-norm:"
  "--optimizers.scales.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.scales.optimizer.weight-decay:"
  "--optimizers.quats.scheduler[(default\: None)]:optimizers.quats.scheduler:(None)"
  "--optimizers.quats.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.quats.optimizer.lr:"
  "--optimizers.quats.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.quats.optimizer.eps:"
  "--optimizers.quats.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.quats.optimizer.max-norm:"
  "--optimizers.quats.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.quats.optimizer.weight-decay:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.0001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 0)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-07)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 1000)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
  "--optimizers.bilateral-grid.optimizer.lr[The learning rate to use. (default\: 0.002)]:optimizers.bilateral-grid.optimizer.lr:"
  "--optimizers.bilateral-grid.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.bilateral-grid.optimizer.eps:"
  "--optimizers.bilateral-grid.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.bilateral-grid.optimizer.max-norm:"
  "--optimizers.bilateral-grid.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.bilateral-grid.optimizer.weight-decay:"
  "--optimizers.bilateral-grid.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 0)]:optimizers.bilateral-grid.scheduler.lr-pre-warmup:"
  "--optimizers.bilateral-grid.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.bilateral-grid.scheduler.lr-final:"
  "--optimizers.bilateral-grid.scheduler.warmup-steps[Number of warmup steps. (default\: 1000)]:optimizers.bilateral-grid.scheduler.warmup-steps:"
  "--optimizers.bilateral-grid.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.bilateral-grid.scheduler.max-steps:"
  "--optimizers.bilateral-grid.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.bilateral-grid.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_splatfacto_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_splatfacto_big_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: splatfacto)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 0)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 100)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.cache-images[Whether to cache images in memory. If \"cpu\", caches on cpu. If \"gpu\", caches on device. (default\: gpu)]:pipeline.datamanager.cache-images:(cpu gpu)"
  "--pipeline.datamanager.cache-images-type[The image type returned from manager, caching images in uint8 saves memory (default\: uint8)]:pipeline.datamanager.cache-images-type:(uint8 float32)"
  "--pipeline.datamanager.max-thread-workers[The maximum number of threads to use for caching images. If None, uses all available threads. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.train-cameras-sampling-strategy[Specifies which sampling strategy is used to generate train cameras, \'random\' means sampling
uniformly random without replacement, \'fps\' means farthest point sampling which is helpful to reduce the artifacts
due to oversampling subsets of cameras that are very close to each other. (default\: random)]:pipeline.datamanager.train-cameras-sampling-strategy:(random fps)"
  "--pipeline.datamanager.train-cameras-sampling-seed[Random seed for sampling train cameras. Fixing seed may help reduce variance of trained models across
different runs. (default\: 42)]:pipeline.datamanager.train-cameras-sampling-seed:"
  "--pipeline.datamanager.fps-reset-every[The number of iterations before one resets fps sampler repeatly, which is essentially drawing fps_reset_every
samples from the pool of all training cameras without replacement before a new round of sampling starts. (default\: 100)]:pipeline.datamanager.fps-reset-every:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.warmup-length[period of steps where refinement is turned off (default\: 500)]:pipeline.model.warmup-length:"
  "--pipeline.model.refine-every[period of steps where gaussians are culled and densified (default\: 100)]:pipeline.model.refine-every:"
  "--pipeline.model.resolution-schedule[training starts at 1\/d resolution, every n steps this is doubled (default\: 3000)]:pipeline.model.resolution-schedule:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: random)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.num-downscales[at the beginning, resolution is 1\/2\^d, where d is this number (default\: 2)]:pipeline.model.num-downscales:"
  "--pipeline.model.cull-alpha-thresh[threshold of opacity for culling gaussians. One can set it to a lower value (e.g. 0.005) for higher quality. (default\: 0.005)]:pipeline.model.cull-alpha-thresh:"
  "--pipeline.model.cull-scale-thresh[threshold of scale for culling huge gaussians (default\: 0.5)]:pipeline.model.cull-scale-thresh:"
  "--pipeline.model.reset-alpha-every[Every this many refinement steps, reset the alpha (default\: 30)]:pipeline.model.reset-alpha-every:"
  "--pipeline.model.densify-grad-thresh[threshold of positional gradient norm for densifying gaussians (default\: 0.0005)]:pipeline.model.densify-grad-thresh:"
  "--pipeline.model.use-absgrad[Whether to use absgrad to densify gaussians, if False, will use grad rather than absgrad (default\: True)]:pipeline.model.use-absgrad:(True False)"
  "--pipeline.model.densify-size-thresh[below this size, gaussians are \*duplicated\*, otherwise split (default\: 0.01)]:pipeline.model.densify-size-thresh:"
  "--pipeline.model.n-split-samples[number of samples to split gaussians into (default\: 2)]:pipeline.model.n-split-samples:"
  "--pipeline.model.sh-degree-interval[every n intervals turn on another sh degree (default\: 1000)]:pipeline.model.sh-degree-interval:"
  "--pipeline.model.cull-screen-size[if a gaussian is more than this percent of screen space, cull it (default\: 0.15)]:pipeline.model.cull-screen-size:"
  "--pipeline.model.split-screen-size[if a gaussian is more than this percent of screen space, split it (default\: 0.05)]:pipeline.model.split-screen-size:"
  "--pipeline.model.stop-screen-size-at[stop culling\/splitting at this step WRT screen size of gaussians (default\: 4000)]:pipeline.model.stop-screen-size-at:"
  "--pipeline.model.random-init[whether to initialize the positions uniformly randomly (not SFM points) (default\: False)]:pipeline.model.random-init:(True False)"
  "--pipeline.model.num-random[Number of gaussians to initialize if random init is used (default\: 50000)]:pipeline.model.num-random:"
  "--pipeline.model.random-scale[Size of the cube to initialize random gaussians within (default\: 10.0)]:pipeline.model.random-scale:"
  "--pipeline.model.ssim-lambda[weight of ssim loss (default\: 0.2)]:pipeline.model.ssim-lambda:"
  "--pipeline.model.stop-split-at[stop splitting at this step (default\: 15000)]:pipeline.model.stop-split-at:"
  "--pipeline.model.sh-degree[maximum degree of spherical harmonics to use (default\: 3)]:pipeline.model.sh-degree:"
  "--pipeline.model.use-scale-regularization[If enabled, a scale regularization introduced in PhysGauss (https\:\/\/xpandora.github.io\/PhysGaussian\/) is used for reducing huge spikey gaussians. (default\: False)]:pipeline.model.use-scale-regularization:(True False)"
  "--pipeline.model.max-gauss-ratio[threshold of ratio of gaussian max to min scale before applying regularization
loss from the PhysGaussian paper (default\: 10.0)]:pipeline.model.max-gauss-ratio:"
  "--pipeline.model.output-depth-during-training[If True, output depth during training. Otherwise, only output depth during evaluation. (default\: False)]:pipeline.model.output-depth-during-training:(True False)"
  "--pipeline.model.rasterize-mode[Classic mode of rendering will use the EWA volume splatting with a \[0.3, 0.3\] screen space blurring kernel. This
approach is however not suitable to render tiny gaussians at higher or lower resolution than the captured, which
results \"aliasing-like\" artifacts. The antialiased mode overcomes this limitation by calculating compensation factors
and apply them to the opacities of gaussians to preserve the total integrated density of splats.


However, PLY exported with antialiased rasterize mode is not compatible with classic mode. Thus many web viewers that
were implemented for classic mode can not render antialiased mode PLY properly without modifications. (default\: classic)]:pipeline.model.rasterize-mode:(classic antialiased)"
  "--pipeline.model.use-bilateral-grid[If True, use bilateral grid to handle the ISP changes in the image space. This technique was introduced in the paper \'Bilateral Guided Radiance Field Processing\' (https\:\/\/bilarfpro.github.io\/). (default\: False)]:pipeline.model.use-bilateral-grid:(True False)"
  "--pipeline.model.grid-shape[Shape of the bilateral grid (X, Y, W) (default\: 16 16 8)]:pipeline.model.grid-shape:"
  "--pipeline.model.color-corrected-metrics[If True, apply color correction to the rendered images before computing the metrics. (default\: False)]:pipeline.model.color-corrected-metrics:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: off)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.means.optimizer.lr[The learning rate to use. (default\: 0.00016)]:optimizers.means.optimizer.lr:"
  "--optimizers.means.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.means.optimizer.eps:"
  "--optimizers.means.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.means.optimizer.max-norm:"
  "--optimizers.means.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.means.optimizer.weight-decay:"
  "--optimizers.means.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.means.scheduler.lr-pre-warmup:"
  "--optimizers.means.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 1.6e-06)]:optimizers.means.scheduler.lr-final:"
  "--optimizers.means.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.means.scheduler.warmup-steps:"
  "--optimizers.means.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.means.scheduler.max-steps:"
  "--optimizers.means.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.means.scheduler.ramp:(linear cosine)"
  "--optimizers.features-dc.scheduler[(default\: None)]:optimizers.features-dc.scheduler:(None)"
  "--optimizers.features-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.features-dc.optimizer.lr:"
  "--optimizers.features-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-dc.optimizer.eps:"
  "--optimizers.features-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-dc.optimizer.max-norm:"
  "--optimizers.features-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-dc.optimizer.weight-decay:"
  "--optimizers.features-rest.scheduler[(default\: None)]:optimizers.features-rest.scheduler:(None)"
  "--optimizers.features-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.features-rest.optimizer.lr:"
  "--optimizers.features-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-rest.optimizer.eps:"
  "--optimizers.features-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-rest.optimizer.max-norm:"
  "--optimizers.features-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-rest.optimizer.weight-decay:"
  "--optimizers.opacities.scheduler[(default\: None)]:optimizers.opacities.scheduler:(None)"
  "--optimizers.opacities.optimizer.lr[The learning rate to use. (default\: 0.05)]:optimizers.opacities.optimizer.lr:"
  "--optimizers.opacities.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.opacities.optimizer.eps:"
  "--optimizers.opacities.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.opacities.optimizer.max-norm:"
  "--optimizers.opacities.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.opacities.optimizer.weight-decay:"
  "--optimizers.scales.scheduler[(default\: None)]:optimizers.scales.scheduler:(None)"
  "--optimizers.scales.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.scales.optimizer.lr:"
  "--optimizers.scales.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.scales.optimizer.eps:"
  "--optimizers.scales.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.scales.optimizer.max-norm:"
  "--optimizers.scales.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.scales.optimizer.weight-decay:"
  "--optimizers.quats.scheduler[(default\: None)]:optimizers.quats.scheduler:(None)"
  "--optimizers.quats.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.quats.optimizer.lr:"
  "--optimizers.quats.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.quats.optimizer.eps:"
  "--optimizers.quats.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.quats.optimizer.max-norm:"
  "--optimizers.quats.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.quats.optimizer.weight-decay:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.0001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 0)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-07)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 1000)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
  "--optimizers.bilateral-grid.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.bilateral-grid.optimizer.lr:"
  "--optimizers.bilateral-grid.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.bilateral-grid.optimizer.eps:"
  "--optimizers.bilateral-grid.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.bilateral-grid.optimizer.max-norm:"
  "--optimizers.bilateral-grid.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.bilateral-grid.optimizer.weight-decay:"
  "--optimizers.bilateral-grid.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 0)]:optimizers.bilateral-grid.scheduler.lr-pre-warmup:"
  "--optimizers.bilateral-grid.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.bilateral-grid.scheduler.lr-final:"
  "--optimizers.bilateral-grid.scheduler.warmup-steps[Number of warmup steps. (default\: 1000)]:optimizers.bilateral-grid.scheduler.warmup-steps:"
  "--optimizers.bilateral-grid.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.bilateral-grid.scheduler.max-steps:"
  "--optimizers.bilateral-grid.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.bilateral-grid.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_splatfacto_big_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_splatfacto_big_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_splatfacto_big_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_splatfacto_big_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_splatfacto_big_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_splatfacto_big_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_splatfacto_big_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_splatfacto_big_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_splatfacto_big_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_splatfacto_big_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_splatfacto_big_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_splatfacto_big_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_splatfacto_big_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_splatfacto_big_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_splatfacto_big_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_splatfacto_big_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_splatfacto_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_splatfacto_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_splatfacto_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_splatfacto_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_splatfacto_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_splatfacto_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_splatfacto_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_splatfacto_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_splatfacto_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_splatfacto_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_splatfacto_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_splatfacto_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_splatfacto_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_splatfacto_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_splatfacto_w_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_splatfacto_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_tensorf_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: tensorf)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 30000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 4096)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 4096)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.num-processes[Number of processes to use for train data loading. More than 1 doesn\'t result in that much better performance (default\: 1)]:pipeline.datamanager.num-processes:"
  "--pipeline.datamanager.queue-size[Size of shared data queue containing generated ray bundles and batches.
If queue_size \<\= 0, the queue size is infinite. (default\: 2)]:pipeline.datamanager.queue-size:"
  "--pipeline.datamanager.max-thread-workers[Maximum number of threads to use in thread pool executor. If None, use ThreadPool default. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.init-resolution[initial render resolution (default\: 128)]:pipeline.model.init-resolution:"
  "--pipeline.model.final-resolution[final render resolution (default\: 300)]:pipeline.model.final-resolution:"
  "--pipeline.model.upsampling-iters[specifies a list of iteration step numbers to perform upsampling (default\: 2000 3000 4000 5500 7000)]:pipeline.model.upsampling-iters:"
  "--pipeline.model.num-samples[Number of samples in field evaluation (default\: 50)]:pipeline.model.num-samples:"
  "--pipeline.model.num-uniform-samples[Number of samples in density evaluation (default\: 200)]:pipeline.model.num-uniform-samples:"
  "--pipeline.model.num-den-components[Number of components in density encoding (default\: 16)]:pipeline.model.num-den-components:"
  "--pipeline.model.num-color-components[Number of components in color encoding (default\: 48)]:pipeline.model.num-color-components:"
  "--pipeline.model.appearance-dim[Number of channels for color encoding (default\: 27)]:pipeline.model.appearance-dim:"
  "--pipeline.model.tensorf-encoding[(default\: vm)]:pipeline.model.tensorf-encoding:(triplane vm cp)"
  "--pipeline.model.regularization[Regularization method used in tensorf paper (default\: tv)]:pipeline.model.regularization:(none l1 tv)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: white)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.loss-coefficients.rgb-loss[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss:"
  "--pipeline.model.loss-coefficients.tv-reg-density[(default\: 0.001)]:pipeline.model.loss-coefficients.tv-reg-density:"
  "--pipeline.model.loss-coefficients.tv-reg-color[(default\: 0.0001)]:pipeline.model.loss-coefficients.tv-reg-color:"
  "--pipeline.model.loss-coefficients.l1-reg[(default\: 0.0005)]:pipeline.model.loss-coefficients.l1-reg:"
  "--pipeline.model.camera-optimizer.mode[Pose optimization strategy to use. If enabled, we recommend SO3xR3. (default\: off)]:pipeline.model.camera-optimizer.mode:(off SO3xR3 SE3)"
  "--pipeline.model.camera-optimizer.trans-l2-penalty[L2 penalty on translation parameters. (default\: 0.01)]:pipeline.model.camera-optimizer.trans-l2-penalty:"
  "--pipeline.model.camera-optimizer.rot-l2-penalty[L2 penalty on rotation parameters. (default\: 0.001)]:pipeline.model.camera-optimizer.rot-l2-penalty:"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.fields.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.fields.scheduler.lr-pre-warmup:"
  "--optimizers.fields.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0001)]:optimizers.fields.scheduler.lr-final:"
  "--optimizers.fields.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.fields.scheduler.warmup-steps:"
  "--optimizers.fields.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.fields.scheduler.max-steps:"
  "--optimizers.fields.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.fields.scheduler.ramp:(linear cosine)"
  "--optimizers.encodings.optimizer.lr[The learning rate to use. (default\: 0.02)]:optimizers.encodings.optimizer.lr:"
  "--optimizers.encodings.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.encodings.optimizer.eps:"
  "--optimizers.encodings.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.encodings.optimizer.max-norm:"
  "--optimizers.encodings.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.encodings.optimizer.weight-decay:"
  "--optimizers.encodings.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.encodings.scheduler.lr-pre-warmup:"
  "--optimizers.encodings.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.002)]:optimizers.encodings.scheduler.lr-final:"
  "--optimizers.encodings.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.encodings.scheduler.warmup-steps:"
  "--optimizers.encodings.scheduler.max-steps[The maximum number of steps. (default\: 30000)]:optimizers.encodings.scheduler.max-steps:"
  "--optimizers.encodings.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.encodings.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.0001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 1e-05)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 5000)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_tensorf_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_tensorf_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_tensorf_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_tensorf_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_tensorf_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_tensorf_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_tensorf_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_tensorf_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_tensorf_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_tensorf_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_tensorf_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_tensorf_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_tensorf_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_tensorf_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_tensorf_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_tensorf_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_tetra_nerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_tetra_nerf_original_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_vanilla_nerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: vanilla-nerf)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: wandb)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 1000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 500)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 500)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 25000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 1000000)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.train-num-rays-per-batch[Number of rays per batch to use per training iteration. (default\: 1024)]:pipeline.datamanager.train-num-rays-per-batch:"
  "--pipeline.datamanager.train-num-images-to-sample-from[Number of images to sample during training iteration. (default\: -1)]:pipeline.datamanager.train-num-images-to-sample-from:"
  "--pipeline.datamanager.train-num-times-to-repeat-images[When not training on all images, number of iterations before picking new
images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.train-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-num-rays-per-batch[Number of rays per batch to use per eval iteration. (default\: 1024)]:pipeline.datamanager.eval-num-rays-per-batch:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.patch-size[Size of patch to sample from. If \> 1, patch-based sampling will be used. (default\: 1)]:pipeline.datamanager.patch-size:"
  "--pipeline.datamanager.pixel-sampler.num-rays-per-batch[Number of rays to sample per batch. (default\: 4096)]:pipeline.datamanager.pixel-sampler.num-rays-per-batch:"
  "--pipeline.datamanager.pixel-sampler.keep-full-image[Whether or not to include a reference to the full image in returned batch. (default\: False)]:pipeline.datamanager.pixel-sampler.keep-full-image:(True False)"
  "--pipeline.datamanager.pixel-sampler.is-equirectangular[List of whether or not camera i is equirectangular. (default\: False)]:pipeline.datamanager.pixel-sampler.is-equirectangular:(True False)"
  "--pipeline.datamanager.pixel-sampler.ignore-mask[Whether to ignore the masks when sampling. (default\: False)]:pipeline.datamanager.pixel-sampler.ignore-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.fisheye-crop-radius[Set to the radius (in pixels) for fisheye cameras. (default\: None)]:pipeline.datamanager.pixel-sampler.fisheye-crop-radius:"
  "--pipeline.datamanager.pixel-sampler.rejection-sample-mask[Whether or not to use rejection sampling when sampling images with masks (default\: True)]:pipeline.datamanager.pixel-sampler.rejection-sample-mask:(True False)"
  "--pipeline.datamanager.pixel-sampler.max-num-iterations[If rejection sampling masks, the maximum number of times to sample (default\: 100)]:pipeline.datamanager.pixel-sampler.max-num-iterations:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-coarse-samples[Number of samples in coarse field evaluation (default\: 64)]:pipeline.model.num-coarse-samples:"
  "--pipeline.model.num-importance-samples[Number of samples in fine field evaluation (default\: 128)]:pipeline.model.num-importance-samples:"
  "--pipeline.model.enable-temporal-distortion[Specifies whether or not to include ray warping based on time. (default\: False)]:pipeline.model.enable-temporal-distortion:(True False)"
  "--pipeline.model.use-gradient-scaling[Use gradient scaler where the gradients are lower for points closer to the camera. (default\: False)]:pipeline.model.use-gradient-scaling:(True False)"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: white)]:pipeline.model.background-color:(random last_sample black white)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--pipeline.model.temporal-distortion-params.kind[(default\: DNERF)]:pipeline.model.temporal-distortion-params.kind:(DNERF)"
  "--optimizers.fields.scheduler[(default\: None)]:optimizers.fields.scheduler:(None)"
  "--optimizers.fields.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.fields.optimizer.lr:"
  "--optimizers.fields.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.fields.optimizer.eps:"
  "--optimizers.fields.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.fields.optimizer.max-norm:"
  "--optimizers.fields.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.fields.optimizer.weight-decay:"
  "--optimizers.temporal-distortion.scheduler[(default\: None)]:optimizers.temporal-distortion.scheduler:(None)"
  "--optimizers.temporal-distortion.optimizer.lr[The learning rate to use. (default\: 0.0005)]:optimizers.temporal-distortion.optimizer.lr:"
  "--optimizers.temporal-distortion.optimizer.eps[The epsilon value to use. (default\: 1e-08)]:optimizers.temporal-distortion.optimizer.eps:"
  "--optimizers.temporal-distortion.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.temporal-distortion.optimizer.max-norm:"
  "--optimizers.temporal-distortion.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.temporal-distortion.optimizer.weight-decay:"
)

_shtab_tyro_ns_train_vanilla_nerf_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_vanilla_nerf_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_vanilla_nerf_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_vanilla_nerf_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_vanilla_nerf_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_vanilla_nerf_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_vanilla_nerf_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_vanilla_nerf_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_vanilla_nerf_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: False)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_vanilla_nerf_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_vanilla_nerf_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_vanilla_nerf_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_vanilla_nerf_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_vanilla_nerf_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_vanilla_nerf_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_vanilla_nerf_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_volinga_options=(
  {-h,--help}"[show this help message and exit]:help:"
)

_shtab_tyro_ns_train_water_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: water)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 0)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 1000)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 15001)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: False)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.cache-images[Whether to cache images in memory. If \"cpu\", caches on cpu. If \"gpu\", caches on device. (default\: gpu)]:pipeline.datamanager.cache-images:(cpu gpu)"
  "--pipeline.datamanager.cache-images-type[The image type returned from manager, caching images in uint8 saves memory (default\: uint8)]:pipeline.datamanager.cache-images-type:(uint8 float32)"
  "--pipeline.datamanager.max-thread-workers[The maximum number of threads to use for caching images. If None, uses all available threads. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.train-cameras-sampling-strategy[Specifies which sampling strategy is used to generate train cameras, \'random\' means sampling
uniformly random without replacement, \'fps\' means farthest point sampling which is helpful to reduce the artifacts
due to oversampling subsets of cameras that are very close to each other. (default\: random)]:pipeline.datamanager.train-cameras-sampling-strategy:(random fps)"
  "--pipeline.datamanager.train-cameras-sampling-seed[Random seed for sampling train cameras. Fixing seed may help reduce variance of trained models across
different runs. (default\: 42)]:pipeline.datamanager.train-cameras-sampling-seed:"
  "--pipeline.datamanager.fps-reset-every[The number of iterations before one resets fps sampler repeatly, which is essentially drawing fps_reset_every
samples from the pool of all training cameras without replacement before a new round of sampling starts. (default\: 100)]:pipeline.datamanager.fps-reset-every:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-steps[Number of steps to train the model (default\: 15001)]:pipeline.model.num-steps:"
  "--pipeline.model.warmup-length[period of steps where refinement is turned off (default\: 500)]:pipeline.model.warmup-length:"
  "--pipeline.model.refine-every[period of steps where gaussians are culled and densified (default\: 100)]:pipeline.model.refine-every:"
  "--pipeline.model.resolution-schedule[training starts at 1\/d resolution, every n steps this is doubled (default\: 3000)]:pipeline.model.resolution-schedule:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: black)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.num-downscales[at the beginning, resolution is 1\/2\^d, where d is this number (default\: 2)]:pipeline.model.num-downscales:"
  "--pipeline.model.cull-alpha-thresh[threshold of opacity for culling gaussians. One can set it to a lower value (e.g. 0.005) for higher quality. (default\: 0.5)]:pipeline.model.cull-alpha-thresh:"
  "--pipeline.model.cull-alpha-thresh-post[threshold of opacity for post culling gaussians (default\: 0.1)]:pipeline.model.cull-alpha-thresh-post:"
  "--pipeline.model.reset-alpha-thresh[threshold of opacity for resetting alpha (default\: 0.5)]:pipeline.model.reset-alpha-thresh:"
  "--pipeline.model.cull-scale-thresh[threshold of scale for culling huge gaussians (default\: 10.0)]:pipeline.model.cull-scale-thresh:"
  "--pipeline.model.continue-cull-post-densification[If True, continue to cull gaussians post refinement (default\: True)]:pipeline.model.continue-cull-post-densification:(True False)"
  "--pipeline.model.zero-medium[If True, zero out the medium field (default\: False)]:pipeline.model.zero-medium:(True False)"
  "--pipeline.model.reset-alpha-every[Every this many refinement steps, reset the alpha (default\: 5)]:pipeline.model.reset-alpha-every:"
  "--pipeline.model.abs-grad-densification[If True, use absolute gradient for densification (default\: True)]:pipeline.model.abs-grad-densification:(True False)"
  "--pipeline.model.densify-grad-thresh[threshold of positional gradient norm for densifying gaussians (0.0004, 0.0008) (default\: 0.0008)]:pipeline.model.densify-grad-thresh:"
  "--pipeline.model.densify-size-thresh[below this size, gaussians are \*duplicated\*, otherwise split (default\: 0.001)]:pipeline.model.densify-size-thresh:"
  "--pipeline.model.n-split-samples[number of samples to split gaussians into (default\: 2)]:pipeline.model.n-split-samples:"
  "--pipeline.model.sh-degree-interval[every n intervals turn on another sh degree (default\: 1000)]:pipeline.model.sh-degree-interval:"
  "--pipeline.model.clip-thresh[minimum depth threshold (default\: 0.01)]:pipeline.model.clip-thresh:"
  "--pipeline.model.cull-screen-size[if a gaussian is more than this percent of screen space, cull it (default\: 0.15)]:pipeline.model.cull-screen-size:"
  "--pipeline.model.split-screen-size[if a gaussian is more than this percent of screen space, split it (default\: 0.05)]:pipeline.model.split-screen-size:"
  "--pipeline.model.stop-screen-size-at[stop culling\/splitting at this step WRT screen size of gaussians (default\: 0)]:pipeline.model.stop-screen-size-at:"
  "--pipeline.model.random-init[whether to initialize the positions uniformly randomly (not SFM points) (default\: False)]:pipeline.model.random-init:(True False)"
  "--pipeline.model.num-random[Number of gaussians to initialize if random init is used (default\: 50000)]:pipeline.model.num-random:"
  "--pipeline.model.random-scale[Size of the cube to initialize random gaussians within (default\: 10.0)]:pipeline.model.random-scale:"
  "--pipeline.model.ssim-lambda[weight of ssim loss (default\: 0.2)]:pipeline.model.ssim-lambda:"
  "--pipeline.model.main-loss[main loss to use (default\: reg_l1)]:pipeline.model.main-loss:(l1 reg_l1 reg_l2)"
  "--pipeline.model.ssim-loss[ssim loss to use (default\: reg_ssim)]:pipeline.model.ssim-loss:(reg_ssim ssim)"
  "--pipeline.model.stop-split-at[stop splitting at this step (default\: 10000)]:pipeline.model.stop-split-at:"
  "--pipeline.model.sh-degree[maximum degree of spherical harmonics to use (default\: 3)]:pipeline.model.sh-degree:"
  "--pipeline.model.rasterize-mode[Classic mode of rendering will use the EWA volume splatting with a \[0.3, 0.3\] screen space blurring kernel. This
approach is however not suitable to render tiny gaussians at higher or lower resolution than the captured, which
results \"aliasing-like\" artifacts. The antialiased mode overcomes this limitation by calculating compensation factors
and apply them to the opacities of gaussians to preserve the total integrated density of splats.


However, PLY exported with antialiased rasterize mode is not compatible with classic mode. Thus many web viewers that
were implemented for classic mode can not render antialiased mode PLY properly without modifications. (default\: classic)]:pipeline.model.rasterize-mode:(classic antialiased)"
  "--pipeline.model.num-layers-medium[Number of hidden layers for medium MLP. (default\: 2)]:pipeline.model.num-layers-medium:"
  "--pipeline.model.hidden-dim-medium[Dimension of hidden layers for medium MLP. (default\: 128)]:pipeline.model.hidden-dim-medium:"
  "--pipeline.model.medium-density-bias[Bias for medium density (sigma_bs and sigma_attn). (default\: 0.0)]:pipeline.model.medium-density-bias:"
  "--pipeline.model.mlp-type[Type of MLP to use for medium MLP. (default\: tcnn)]:pipeline.model.mlp-type:(tcnn torch)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--optimizers.means.optimizer.lr[The learning rate to use. (default\: 0.00016)]:optimizers.means.optimizer.lr:"
  "--optimizers.means.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.means.optimizer.eps:"
  "--optimizers.means.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.means.optimizer.max-norm:"
  "--optimizers.means.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.means.optimizer.weight-decay:"
  "--optimizers.means.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.means.scheduler.lr-pre-warmup:"
  "--optimizers.means.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.means.scheduler.lr-final:"
  "--optimizers.means.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.means.scheduler.warmup-steps:"
  "--optimizers.means.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.means.scheduler.max-steps:"
  "--optimizers.means.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.means.scheduler.ramp:(linear cosine)"
  "--optimizers.features-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.features-dc.optimizer.lr:"
  "--optimizers.features-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-dc.optimizer.eps:"
  "--optimizers.features-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-dc.optimizer.max-norm:"
  "--optimizers.features-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-dc.optimizer.weight-decay:"
  "--optimizers.features-dc.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.features-dc.scheduler.lr-pre-warmup:"
  "--optimizers.features-dc.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0025)]:optimizers.features-dc.scheduler.lr-final:"
  "--optimizers.features-dc.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.features-dc.scheduler.warmup-steps:"
  "--optimizers.features-dc.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.features-dc.scheduler.max-steps:"
  "--optimizers.features-dc.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.features-dc.scheduler.ramp:(linear cosine)"
  "--optimizers.features-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.features-rest.optimizer.lr:"
  "--optimizers.features-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-rest.optimizer.eps:"
  "--optimizers.features-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-rest.optimizer.max-norm:"
  "--optimizers.features-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-rest.optimizer.weight-decay:"
  "--optimizers.features-rest.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.features-rest.scheduler.lr-pre-warmup:"
  "--optimizers.features-rest.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.000125)]:optimizers.features-rest.scheduler.lr-final:"
  "--optimizers.features-rest.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.features-rest.scheduler.warmup-steps:"
  "--optimizers.features-rest.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.features-rest.scheduler.max-steps:"
  "--optimizers.features-rest.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.features-rest.scheduler.ramp:(linear cosine)"
  "--optimizers.opacities.optimizer.lr[The learning rate to use. (default\: 0.05)]:optimizers.opacities.optimizer.lr:"
  "--optimizers.opacities.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.opacities.optimizer.eps:"
  "--optimizers.opacities.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.opacities.optimizer.max-norm:"
  "--optimizers.opacities.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.opacities.optimizer.weight-decay:"
  "--optimizers.opacities.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.opacities.scheduler.lr-pre-warmup:"
  "--optimizers.opacities.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.05)]:optimizers.opacities.scheduler.lr-final:"
  "--optimizers.opacities.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.opacities.scheduler.warmup-steps:"
  "--optimizers.opacities.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.opacities.scheduler.max-steps:"
  "--optimizers.opacities.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.opacities.scheduler.ramp:(linear cosine)"
  "--optimizers.scales.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.scales.optimizer.lr:"
  "--optimizers.scales.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.scales.optimizer.eps:"
  "--optimizers.scales.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.scales.optimizer.max-norm:"
  "--optimizers.scales.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.scales.optimizer.weight-decay:"
  "--optimizers.scales.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.scales.scheduler.lr-pre-warmup:"
  "--optimizers.scales.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.005)]:optimizers.scales.scheduler.lr-final:"
  "--optimizers.scales.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.scales.scheduler.warmup-steps:"
  "--optimizers.scales.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.scales.scheduler.max-steps:"
  "--optimizers.scales.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.scales.scheduler.ramp:(linear cosine)"
  "--optimizers.quats.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.quats.optimizer.lr:"
  "--optimizers.quats.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.quats.optimizer.eps:"
  "--optimizers.quats.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.quats.optimizer.max-norm:"
  "--optimizers.quats.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.quats.optimizer.weight-decay:"
  "--optimizers.quats.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.quats.scheduler.lr-pre-warmup:"
  "--optimizers.quats.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.001)]:optimizers.quats.scheduler.lr-final:"
  "--optimizers.quats.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.quats.scheduler.warmup-steps:"
  "--optimizers.quats.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.quats.scheduler.max-steps:"
  "--optimizers.quats.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.quats.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-feature-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.medium-feature-dc.optimizer.lr:"
  "--optimizers.medium-feature-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-feature-dc.optimizer.eps:"
  "--optimizers.medium-feature-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.medium-feature-dc.optimizer.max-norm:"
  "--optimizers.medium-feature-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-feature-dc.optimizer.weight-decay:"
  "--optimizers.medium-feature-dc.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.medium-feature-dc.scheduler.lr-pre-warmup:"
  "--optimizers.medium-feature-dc.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0025)]:optimizers.medium-feature-dc.scheduler.lr-final:"
  "--optimizers.medium-feature-dc.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.medium-feature-dc.scheduler.warmup-steps:"
  "--optimizers.medium-feature-dc.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.medium-feature-dc.scheduler.max-steps:"
  "--optimizers.medium-feature-dc.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.medium-feature-dc.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-feature-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.medium-feature-rest.optimizer.lr:"
  "--optimizers.medium-feature-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-feature-rest.optimizer.eps:"
  "--optimizers.medium-feature-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.medium-feature-rest.optimizer.max-norm:"
  "--optimizers.medium-feature-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-feature-rest.optimizer.weight-decay:"
  "--optimizers.medium-feature-rest.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.medium-feature-rest.scheduler.lr-pre-warmup:"
  "--optimizers.medium-feature-rest.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.000125)]:optimizers.medium-feature-rest.scheduler.lr-final:"
  "--optimizers.medium-feature-rest.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.medium-feature-rest.scheduler.warmup-steps:"
  "--optimizers.medium-feature-rest.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.medium-feature-rest.scheduler.max-steps:"
  "--optimizers.medium-feature-rest.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.medium-feature-rest.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_water_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_water_big_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: water-big)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 0)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 100)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 15001)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.cache-images[Whether to cache images in memory. If \"cpu\", caches on cpu. If \"gpu\", caches on device. (default\: gpu)]:pipeline.datamanager.cache-images:(cpu gpu)"
  "--pipeline.datamanager.cache-images-type[The image type returned from manager, caching images in uint8 saves memory (default\: uint8)]:pipeline.datamanager.cache-images-type:(uint8 float32)"
  "--pipeline.datamanager.max-thread-workers[The maximum number of threads to use for caching images. If None, uses all available threads. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.train-cameras-sampling-strategy[Specifies which sampling strategy is used to generate train cameras, \'random\' means sampling
uniformly random without replacement, \'fps\' means farthest point sampling which is helpful to reduce the artifacts
due to oversampling subsets of cameras that are very close to each other. (default\: random)]:pipeline.datamanager.train-cameras-sampling-strategy:(random fps)"
  "--pipeline.datamanager.train-cameras-sampling-seed[Random seed for sampling train cameras. Fixing seed may help reduce variance of trained models across
different runs. (default\: 42)]:pipeline.datamanager.train-cameras-sampling-seed:"
  "--pipeline.datamanager.fps-reset-every[The number of iterations before one resets fps sampler repeatly, which is essentially drawing fps_reset_every
samples from the pool of all training cameras without replacement before a new round of sampling starts. (default\: 100)]:pipeline.datamanager.fps-reset-every:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-steps[Number of steps to train the model (default\: 15001)]:pipeline.model.num-steps:"
  "--pipeline.model.warmup-length[period of steps where refinement is turned off (default\: 500)]:pipeline.model.warmup-length:"
  "--pipeline.model.refine-every[period of steps where gaussians are culled and densified (default\: 100)]:pipeline.model.refine-every:"
  "--pipeline.model.resolution-schedule[training starts at 1\/d resolution, every n steps this is doubled (default\: 3000)]:pipeline.model.resolution-schedule:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: black)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.num-downscales[at the beginning, resolution is 1\/2\^d, where d is this number (default\: 2)]:pipeline.model.num-downscales:"
  "--pipeline.model.cull-alpha-thresh[threshold of opacity for culling gaussians. One can set it to a lower value (e.g. 0.005) for higher quality. (default\: 0.5)]:pipeline.model.cull-alpha-thresh:"
  "--pipeline.model.cull-alpha-thresh-post[threshold of opacity for post culling gaussians (default\: 0.1)]:pipeline.model.cull-alpha-thresh-post:"
  "--pipeline.model.reset-alpha-thresh[threshold of opacity for resetting alpha (default\: 0.5)]:pipeline.model.reset-alpha-thresh:"
  "--pipeline.model.cull-scale-thresh[threshold of scale for culling huge gaussians (default\: 10.0)]:pipeline.model.cull-scale-thresh:"
  "--pipeline.model.continue-cull-post-densification[If True, continue to cull gaussians post refinement (default\: False)]:pipeline.model.continue-cull-post-densification:(True False)"
  "--pipeline.model.zero-medium[If True, zero out the medium field (default\: False)]:pipeline.model.zero-medium:(True False)"
  "--pipeline.model.reset-alpha-every[Every this many refinement steps, reset the alpha (default\: 5)]:pipeline.model.reset-alpha-every:"
  "--pipeline.model.abs-grad-densification[If True, use absolute gradient for densification (default\: True)]:pipeline.model.abs-grad-densification:(True False)"
  "--pipeline.model.densify-grad-thresh[threshold of positional gradient norm for densifying gaussians (0.0004, 0.0008) (default\: 0.0008)]:pipeline.model.densify-grad-thresh:"
  "--pipeline.model.densify-size-thresh[below this size, gaussians are \*duplicated\*, otherwise split (default\: 0.001)]:pipeline.model.densify-size-thresh:"
  "--pipeline.model.n-split-samples[number of samples to split gaussians into (default\: 2)]:pipeline.model.n-split-samples:"
  "--pipeline.model.sh-degree-interval[every n intervals turn on another sh degree (default\: 1000)]:pipeline.model.sh-degree-interval:"
  "--pipeline.model.clip-thresh[minimum depth threshold (default\: 0.01)]:pipeline.model.clip-thresh:"
  "--pipeline.model.cull-screen-size[if a gaussian is more than this percent of screen space, cull it (default\: 0.15)]:pipeline.model.cull-screen-size:"
  "--pipeline.model.split-screen-size[if a gaussian is more than this percent of screen space, split it (default\: 0.05)]:pipeline.model.split-screen-size:"
  "--pipeline.model.stop-screen-size-at[stop culling\/splitting at this step WRT screen size of gaussians (default\: 0)]:pipeline.model.stop-screen-size-at:"
  "--pipeline.model.random-init[whether to initialize the positions uniformly randomly (not SFM points) (default\: False)]:pipeline.model.random-init:(True False)"
  "--pipeline.model.num-random[Number of gaussians to initialize if random init is used (default\: 50000)]:pipeline.model.num-random:"
  "--pipeline.model.random-scale[Size of the cube to initialize random gaussians within (default\: 10.0)]:pipeline.model.random-scale:"
  "--pipeline.model.ssim-lambda[weight of ssim loss (default\: 0.2)]:pipeline.model.ssim-lambda:"
  "--pipeline.model.main-loss[main loss to use (default\: reg_l1)]:pipeline.model.main-loss:(l1 reg_l1 reg_l2)"
  "--pipeline.model.ssim-loss[ssim loss to use (default\: reg_ssim)]:pipeline.model.ssim-loss:(reg_ssim ssim)"
  "--pipeline.model.stop-split-at[stop splitting at this step (default\: 10000)]:pipeline.model.stop-split-at:"
  "--pipeline.model.sh-degree[maximum degree of spherical harmonics to use (default\: 3)]:pipeline.model.sh-degree:"
  "--pipeline.model.rasterize-mode[Classic mode of rendering will use the EWA volume splatting with a \[0.3, 0.3\] screen space blurring kernel. This
approach is however not suitable to render tiny gaussians at higher or lower resolution than the captured, which
results \"aliasing-like\" artifacts. The antialiased mode overcomes this limitation by calculating compensation factors
and apply them to the opacities of gaussians to preserve the total integrated density of splats.


However, PLY exported with antialiased rasterize mode is not compatible with classic mode. Thus many web viewers that
were implemented for classic mode can not render antialiased mode PLY properly without modifications. (default\: classic)]:pipeline.model.rasterize-mode:(classic antialiased)"
  "--pipeline.model.num-layers-medium[Number of hidden layers for medium MLP. (default\: 2)]:pipeline.model.num-layers-medium:"
  "--pipeline.model.hidden-dim-medium[Dimension of hidden layers for medium MLP. (default\: 128)]:pipeline.model.hidden-dim-medium:"
  "--pipeline.model.medium-density-bias[Bias for medium density (sigma_bs and sigma_attn). (default\: 0.0)]:pipeline.model.medium-density-bias:"
  "--pipeline.model.mlp-type[Type of MLP to use for medium MLP. (default\: tcnn)]:pipeline.model.mlp-type:(tcnn torch)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--optimizers.means.optimizer.lr[The learning rate to use. (default\: 0.00016)]:optimizers.means.optimizer.lr:"
  "--optimizers.means.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.means.optimizer.eps:"
  "--optimizers.means.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.means.optimizer.max-norm:"
  "--optimizers.means.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.means.optimizer.weight-decay:"
  "--optimizers.means.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.means.scheduler.lr-pre-warmup:"
  "--optimizers.means.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.means.scheduler.lr-final:"
  "--optimizers.means.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.means.scheduler.warmup-steps:"
  "--optimizers.means.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.means.scheduler.max-steps:"
  "--optimizers.means.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.means.scheduler.ramp:(linear cosine)"
  "--optimizers.features-dc.scheduler[(default\: None)]:optimizers.features-dc.scheduler:(None)"
  "--optimizers.features-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.features-dc.optimizer.lr:"
  "--optimizers.features-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-dc.optimizer.eps:"
  "--optimizers.features-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-dc.optimizer.max-norm:"
  "--optimizers.features-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-dc.optimizer.weight-decay:"
  "--optimizers.features-rest.scheduler[(default\: None)]:optimizers.features-rest.scheduler:(None)"
  "--optimizers.features-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.features-rest.optimizer.lr:"
  "--optimizers.features-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-rest.optimizer.eps:"
  "--optimizers.features-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-rest.optimizer.max-norm:"
  "--optimizers.features-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-rest.optimizer.weight-decay:"
  "--optimizers.opacities.scheduler[(default\: None)]:optimizers.opacities.scheduler:(None)"
  "--optimizers.opacities.optimizer.lr[The learning rate to use. (default\: 0.05)]:optimizers.opacities.optimizer.lr:"
  "--optimizers.opacities.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.opacities.optimizer.eps:"
  "--optimizers.opacities.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.opacities.optimizer.max-norm:"
  "--optimizers.opacities.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.opacities.optimizer.weight-decay:"
  "--optimizers.scales.scheduler[(default\: None)]:optimizers.scales.scheduler:(None)"
  "--optimizers.scales.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.scales.optimizer.lr:"
  "--optimizers.scales.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.scales.optimizer.eps:"
  "--optimizers.scales.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.scales.optimizer.max-norm:"
  "--optimizers.scales.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.scales.optimizer.weight-decay:"
  "--optimizers.quats.scheduler[(default\: None)]:optimizers.quats.scheduler:(None)"
  "--optimizers.quats.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.quats.optimizer.lr:"
  "--optimizers.quats.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.quats.optimizer.eps:"
  "--optimizers.quats.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.quats.optimizer.max-norm:"
  "--optimizers.quats.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.quats.optimizer.weight-decay:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-feature-dc.scheduler[(default\: None)]:optimizers.medium-feature-dc.scheduler:(None)"
  "--optimizers.medium-feature-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.medium-feature-dc.optimizer.lr:"
  "--optimizers.medium-feature-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-feature-dc.optimizer.eps:"
  "--optimizers.medium-feature-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.medium-feature-dc.optimizer.max-norm:"
  "--optimizers.medium-feature-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-feature-dc.optimizer.weight-decay:"
  "--optimizers.medium-feature-rest.scheduler[(default\: None)]:optimizers.medium-feature-rest.scheduler:(None)"
  "--optimizers.medium-feature-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.medium-feature-rest.optimizer.lr:"
  "--optimizers.medium-feature-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-feature-rest.optimizer.eps:"
  "--optimizers.medium-feature-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.medium-feature-rest.optimizer.max-norm:"
  "--optimizers.medium-feature-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-feature-rest.optimizer.weight-decay:"
)

_shtab_tyro_ns_train_water_big_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_water_big_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_water_big_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_water_big_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_water_big_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_water_big_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_water_big_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_water_big_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_big_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_water_big_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_water_big_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_big_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_water_big_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_water_big_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_water_big_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_water_big_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_water_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_water_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_water_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_water_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_water_ex_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: water-ex)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 0)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 1000)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 15001)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: False)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.cache-images[Whether to cache images in memory. If \"cpu\", caches on cpu. If \"gpu\", caches on device. (default\: gpu)]:pipeline.datamanager.cache-images:(cpu gpu)"
  "--pipeline.datamanager.cache-images-type[The image type returned from manager, caching images in uint8 saves memory (default\: uint8)]:pipeline.datamanager.cache-images-type:(uint8 float32)"
  "--pipeline.datamanager.max-thread-workers[The maximum number of threads to use for caching images. If None, uses all available threads. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.train-cameras-sampling-strategy[Specifies which sampling strategy is used to generate train cameras, \'random\' means sampling
uniformly random without replacement, \'fps\' means farthest point sampling which is helpful to reduce the artifacts
due to oversampling subsets of cameras that are very close to each other. (default\: random)]:pipeline.datamanager.train-cameras-sampling-strategy:(random fps)"
  "--pipeline.datamanager.train-cameras-sampling-seed[Random seed for sampling train cameras. Fixing seed may help reduce variance of trained models across
different runs. (default\: 42)]:pipeline.datamanager.train-cameras-sampling-seed:"
  "--pipeline.datamanager.fps-reset-every[The number of iterations before one resets fps sampler repeatly, which is essentially drawing fps_reset_every
samples from the pool of all training cameras without replacement before a new round of sampling starts. (default\: 100)]:pipeline.datamanager.fps-reset-every:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-steps[Number of steps to train the model (default\: 15001)]:pipeline.model.num-steps:"
  "--pipeline.model.warmup-length[period of steps where refinement is turned off (default\: 500)]:pipeline.model.warmup-length:"
  "--pipeline.model.refine-every[period of steps where gaussians are culled and densified (default\: 100)]:pipeline.model.refine-every:"
  "--pipeline.model.resolution-schedule[training starts at 1\/d resolution, every n steps this is doubled (default\: 3000)]:pipeline.model.resolution-schedule:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: black)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.num-downscales[at the beginning, resolution is 1\/2\^d, where d is this number (default\: 2)]:pipeline.model.num-downscales:"
  "--pipeline.model.cull-alpha-thresh[threshold of opacity for culling gaussians. One can set it to a lower value (e.g. 0.005) for higher quality. (default\: 0.5)]:pipeline.model.cull-alpha-thresh:"
  "--pipeline.model.cull-alpha-thresh-post[threshold of opacity for post culling gaussians (default\: 0.1)]:pipeline.model.cull-alpha-thresh-post:"
  "--pipeline.model.reset-alpha-thresh[threshold of opacity for resetting alpha (default\: 0.5)]:pipeline.model.reset-alpha-thresh:"
  "--pipeline.model.cull-scale-thresh[threshold of scale for culling huge gaussians (default\: 10.0)]:pipeline.model.cull-scale-thresh:"
  "--pipeline.model.continue-cull-post-densification[If True, continue to cull gaussians post refinement (default\: True)]:pipeline.model.continue-cull-post-densification:(True False)"
  "--pipeline.model.zero-medium[If True, zero out the medium field (default\: False)]:pipeline.model.zero-medium:(True False)"
  "--pipeline.model.reset-alpha-every[Every this many refinement steps, reset the alpha (default\: 5)]:pipeline.model.reset-alpha-every:"
  "--pipeline.model.abs-grad-densification[If True, use absolute gradient for densification (default\: True)]:pipeline.model.abs-grad-densification:(True False)"
  "--pipeline.model.densify-grad-thresh[threshold of positional gradient norm for densifying gaussians (0.0004, 0.0008) (default\: 0.0008)]:pipeline.model.densify-grad-thresh:"
  "--pipeline.model.densify-size-thresh[below this size, gaussians are \*duplicated\*, otherwise split (default\: 0.001)]:pipeline.model.densify-size-thresh:"
  "--pipeline.model.n-split-samples[number of samples to split gaussians into (default\: 2)]:pipeline.model.n-split-samples:"
  "--pipeline.model.sh-degree-interval[every n intervals turn on another sh degree (default\: 1000)]:pipeline.model.sh-degree-interval:"
  "--pipeline.model.clip-thresh[minimum depth threshold (default\: 0.01)]:pipeline.model.clip-thresh:"
  "--pipeline.model.cull-screen-size[if a gaussian is more than this percent of screen space, cull it (default\: 0.15)]:pipeline.model.cull-screen-size:"
  "--pipeline.model.split-screen-size[if a gaussian is more than this percent of screen space, split it (default\: 0.05)]:pipeline.model.split-screen-size:"
  "--pipeline.model.stop-screen-size-at[stop culling\/splitting at this step WRT screen size of gaussians (default\: 0)]:pipeline.model.stop-screen-size-at:"
  "--pipeline.model.random-init[whether to initialize the positions uniformly randomly (not SFM points) (default\: False)]:pipeline.model.random-init:(True False)"
  "--pipeline.model.num-random[Number of gaussians to initialize if random init is used (default\: 50000)]:pipeline.model.num-random:"
  "--pipeline.model.random-scale[Size of the cube to initialize random gaussians within (default\: 10.0)]:pipeline.model.random-scale:"
  "--pipeline.model.ssim-lambda[weight of ssim loss (default\: 0.2)]:pipeline.model.ssim-lambda:"
  "--pipeline.model.main-loss[main loss to use (default\: reg_l1)]:pipeline.model.main-loss:(l1 reg_l1 reg_l2)"
  "--pipeline.model.ssim-loss[ssim loss to use (default\: reg_ssim)]:pipeline.model.ssim-loss:(reg_ssim ssim)"
  "--pipeline.model.stop-split-at[stop splitting at this step (default\: 10000)]:pipeline.model.stop-split-at:"
  "--pipeline.model.sh-degree[maximum degree of spherical harmonics to use (default\: 3)]:pipeline.model.sh-degree:"
  "--pipeline.model.rasterize-mode[Classic mode of rendering will use the EWA volume splatting with a \[0.3, 0.3\] screen space blurring kernel. This
approach is however not suitable to render tiny gaussians at higher or lower resolution than the captured, which
results \"aliasing-like\" artifacts. The antialiased mode overcomes this limitation by calculating compensation factors
and apply them to the opacities of gaussians to preserve the total integrated density of splats.


However, PLY exported with antialiased rasterize mode is not compatible with classic mode. Thus many web viewers that
were implemented for classic mode can not render antialiased mode PLY properly without modifications. (default\: antialiased)]:pipeline.model.rasterize-mode:(classic antialiased)"
  "--pipeline.model.medium-sh-degree[degree of the spherical harmonics to use for the medium field (default\: 3)]:pipeline.model.medium-sh-degree:"
  "--pipeline.model.use-scale-regularization[If enabled, a scale regularization introduced in PhysGauss (https\:\/\/xpandora.github.io\/PhysGaussian\/) is used for reducing huge spikey gaussians. (default\: False)]:pipeline.model.use-scale-regularization:(True False)"
  "--pipeline.model.max-gauss-ratio[threshold of ratio of gaussian max to min scale before applying regularization
loss from the PhysGaussian paper (default\: 10.0)]:pipeline.model.max-gauss-ratio:"
  "--pipeline.model.with-medium-kernel[If enabled, a forward scatter kernel (default\: True)]:pipeline.model.with-medium-kernel:(True False)"
  "--pipeline.model.kernel-start-step[Train the kernel when the Guassian stable (default\: 6000)]:pipeline.model.kernel-start-step:"
  "--pipeline.model.depth-aware-loss[A depth weigth loss (default\: False)]:pipeline.model.depth-aware-loss:(True False)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--optimizers.means.optimizer.lr[The learning rate to use. (default\: 0.00016)]:optimizers.means.optimizer.lr:"
  "--optimizers.means.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.means.optimizer.eps:"
  "--optimizers.means.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.means.optimizer.max-norm:"
  "--optimizers.means.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.means.optimizer.weight-decay:"
  "--optimizers.means.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.means.scheduler.lr-pre-warmup:"
  "--optimizers.means.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.means.scheduler.lr-final:"
  "--optimizers.means.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.means.scheduler.warmup-steps:"
  "--optimizers.means.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.means.scheduler.max-steps:"
  "--optimizers.means.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.means.scheduler.ramp:(linear cosine)"
  "--optimizers.features-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.features-dc.optimizer.lr:"
  "--optimizers.features-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-dc.optimizer.eps:"
  "--optimizers.features-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-dc.optimizer.max-norm:"
  "--optimizers.features-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-dc.optimizer.weight-decay:"
  "--optimizers.features-dc.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.features-dc.scheduler.lr-pre-warmup:"
  "--optimizers.features-dc.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0025)]:optimizers.features-dc.scheduler.lr-final:"
  "--optimizers.features-dc.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.features-dc.scheduler.warmup-steps:"
  "--optimizers.features-dc.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.features-dc.scheduler.max-steps:"
  "--optimizers.features-dc.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.features-dc.scheduler.ramp:(linear cosine)"
  "--optimizers.features-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.features-rest.optimizer.lr:"
  "--optimizers.features-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-rest.optimizer.eps:"
  "--optimizers.features-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-rest.optimizer.max-norm:"
  "--optimizers.features-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-rest.optimizer.weight-decay:"
  "--optimizers.features-rest.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.features-rest.scheduler.lr-pre-warmup:"
  "--optimizers.features-rest.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.000125)]:optimizers.features-rest.scheduler.lr-final:"
  "--optimizers.features-rest.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.features-rest.scheduler.warmup-steps:"
  "--optimizers.features-rest.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.features-rest.scheduler.max-steps:"
  "--optimizers.features-rest.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.features-rest.scheduler.ramp:(linear cosine)"
  "--optimizers.opacities.optimizer.lr[The learning rate to use. (default\: 0.05)]:optimizers.opacities.optimizer.lr:"
  "--optimizers.opacities.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.opacities.optimizer.eps:"
  "--optimizers.opacities.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.opacities.optimizer.max-norm:"
  "--optimizers.opacities.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.opacities.optimizer.weight-decay:"
  "--optimizers.opacities.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.opacities.scheduler.lr-pre-warmup:"
  "--optimizers.opacities.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.05)]:optimizers.opacities.scheduler.lr-final:"
  "--optimizers.opacities.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.opacities.scheduler.warmup-steps:"
  "--optimizers.opacities.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.opacities.scheduler.max-steps:"
  "--optimizers.opacities.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.opacities.scheduler.ramp:(linear cosine)"
  "--optimizers.scales.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.scales.optimizer.lr:"
  "--optimizers.scales.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.scales.optimizer.eps:"
  "--optimizers.scales.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.scales.optimizer.max-norm:"
  "--optimizers.scales.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.scales.optimizer.weight-decay:"
  "--optimizers.scales.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.scales.scheduler.lr-pre-warmup:"
  "--optimizers.scales.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.005)]:optimizers.scales.scheduler.lr-final:"
  "--optimizers.scales.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.scales.scheduler.warmup-steps:"
  "--optimizers.scales.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.scales.scheduler.max-steps:"
  "--optimizers.scales.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.scales.scheduler.ramp:(linear cosine)"
  "--optimizers.quats.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.quats.optimizer.lr:"
  "--optimizers.quats.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.quats.optimizer.eps:"
  "--optimizers.quats.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.quats.optimizer.max-norm:"
  "--optimizers.quats.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.quats.optimizer.weight-decay:"
  "--optimizers.quats.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.quats.scheduler.lr-pre-warmup:"
  "--optimizers.quats.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.001)]:optimizers.quats.scheduler.lr-final:"
  "--optimizers.quats.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.quats.scheduler.warmup-steps:"
  "--optimizers.quats.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.quats.scheduler.max-steps:"
  "--optimizers.quats.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.quats.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-feature-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.medium-feature-dc.optimizer.lr:"
  "--optimizers.medium-feature-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-feature-dc.optimizer.eps:"
  "--optimizers.medium-feature-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.medium-feature-dc.optimizer.max-norm:"
  "--optimizers.medium-feature-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-feature-dc.optimizer.weight-decay:"
  "--optimizers.medium-feature-dc.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.medium-feature-dc.scheduler.lr-pre-warmup:"
  "--optimizers.medium-feature-dc.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0025)]:optimizers.medium-feature-dc.scheduler.lr-final:"
  "--optimizers.medium-feature-dc.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.medium-feature-dc.scheduler.warmup-steps:"
  "--optimizers.medium-feature-dc.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.medium-feature-dc.scheduler.max-steps:"
  "--optimizers.medium-feature-dc.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.medium-feature-dc.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-feature-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.medium-feature-rest.optimizer.lr:"
  "--optimizers.medium-feature-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-feature-rest.optimizer.eps:"
  "--optimizers.medium-feature-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.medium-feature-rest.optimizer.max-norm:"
  "--optimizers.medium-feature-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-feature-rest.optimizer.weight-decay:"
  "--optimizers.medium-feature-rest.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.medium-feature-rest.scheduler.lr-pre-warmup:"
  "--optimizers.medium-feature-rest.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.000125)]:optimizers.medium-feature-rest.scheduler.lr-final:"
  "--optimizers.medium-feature-rest.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.medium-feature-rest.scheduler.warmup-steps:"
  "--optimizers.medium-feature-rest.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.medium-feature-rest.scheduler.max-steps:"
  "--optimizers.medium-feature-rest.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.medium-feature-rest.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-kernel.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.medium-kernel.optimizer.lr:"
  "--optimizers.medium-kernel.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-kernel.optimizer.eps:"
  "--optimizers.medium-kernel.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.medium-kernel.optimizer.max-norm:"
  "--optimizers.medium-kernel.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-kernel.optimizer.weight-decay:"
  "--optimizers.medium-kernel.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.medium-kernel.scheduler.lr-pre-warmup:"
  "--optimizers.medium-kernel.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.001)]:optimizers.medium-kernel.scheduler.lr-final:"
  "--optimizers.medium-kernel.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.medium-kernel.scheduler.warmup-steps:"
  "--optimizers.medium-kernel.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.medium-kernel.scheduler.max-steps:"
  "--optimizers.medium-kernel.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.medium-kernel.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_water_ex_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_water_ex_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_water_ex_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_water_ex_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_water_ex_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_water_ex_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_water_ex_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_water_ex_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_ex_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_water_ex_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_water_ex_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_ex_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_water_ex_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_water_ex_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_water_ex_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_water_ex_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_water_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_water_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_water_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_water_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_water_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_water_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_water_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_water_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_water_splatting_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: water-splatting)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 0)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 1000)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 15001)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.cache-images[Whether to cache images in memory. If \"cpu\", caches on cpu. If \"gpu\", caches on device. (default\: gpu)]:pipeline.datamanager.cache-images:(cpu gpu)"
  "--pipeline.datamanager.cache-images-type[The image type returned from manager, caching images in uint8 saves memory (default\: uint8)]:pipeline.datamanager.cache-images-type:(uint8 float32)"
  "--pipeline.datamanager.max-thread-workers[The maximum number of threads to use for caching images. If None, uses all available threads. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.train-cameras-sampling-strategy[Specifies which sampling strategy is used to generate train cameras, \'random\' means sampling
uniformly random without replacement, \'fps\' means farthest point sampling which is helpful to reduce the artifacts
due to oversampling subsets of cameras that are very close to each other. (default\: random)]:pipeline.datamanager.train-cameras-sampling-strategy:(random fps)"
  "--pipeline.datamanager.train-cameras-sampling-seed[Random seed for sampling train cameras. Fixing seed may help reduce variance of trained models across
different runs. (default\: 42)]:pipeline.datamanager.train-cameras-sampling-seed:"
  "--pipeline.datamanager.fps-reset-every[The number of iterations before one resets fps sampler repeatly, which is essentially drawing fps_reset_every
samples from the pool of all training cameras without replacement before a new round of sampling starts. (default\: 100)]:pipeline.datamanager.fps-reset-every:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-steps[Number of steps to train the model (default\: 15001)]:pipeline.model.num-steps:"
  "--pipeline.model.warmup-length[period of steps where refinement is turned off (default\: 500)]:pipeline.model.warmup-length:"
  "--pipeline.model.refine-every[period of steps where gaussians are culled and densified (default\: 100)]:pipeline.model.refine-every:"
  "--pipeline.model.resolution-schedule[training starts at 1\/d resolution, every n steps this is doubled (default\: 3000)]:pipeline.model.resolution-schedule:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: black)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.num-downscales[at the beginning, resolution is 1\/2\^d, where d is this number (default\: 2)]:pipeline.model.num-downscales:"
  "--pipeline.model.cull-alpha-thresh[threshold of opacity for culling gaussians. One can set it to a lower value (e.g. 0.005) for higher quality. (default\: 0.5)]:pipeline.model.cull-alpha-thresh:"
  "--pipeline.model.cull-alpha-thresh-post[threshold of opacity for post culling gaussians (default\: 0.1)]:pipeline.model.cull-alpha-thresh-post:"
  "--pipeline.model.reset-alpha-thresh[threshold of opacity for resetting alpha (default\: 0.5)]:pipeline.model.reset-alpha-thresh:"
  "--pipeline.model.cull-scale-thresh[threshold of scale for culling huge gaussians (default\: 10.0)]:pipeline.model.cull-scale-thresh:"
  "--pipeline.model.continue-cull-post-densification[If True, continue to cull gaussians post refinement (default\: True)]:pipeline.model.continue-cull-post-densification:(True False)"
  "--pipeline.model.zero-medium[If True, zero out the medium field (default\: False)]:pipeline.model.zero-medium:(True False)"
  "--pipeline.model.reset-alpha-every[Every this many refinement steps, reset the alpha (default\: 5)]:pipeline.model.reset-alpha-every:"
  "--pipeline.model.abs-grad-densification[If True, use absolute gradient for densification (default\: True)]:pipeline.model.abs-grad-densification:(True False)"
  "--pipeline.model.densify-grad-thresh[threshold of positional gradient norm for densifying gaussians (0.0004, 0.0008) (default\: 0.0008)]:pipeline.model.densify-grad-thresh:"
  "--pipeline.model.densify-size-thresh[below this size, gaussians are \*duplicated\*, otherwise split (default\: 0.001)]:pipeline.model.densify-size-thresh:"
  "--pipeline.model.n-split-samples[number of samples to split gaussians into (default\: 2)]:pipeline.model.n-split-samples:"
  "--pipeline.model.sh-degree-interval[every n intervals turn on another sh degree (default\: 1000)]:pipeline.model.sh-degree-interval:"
  "--pipeline.model.clip-thresh[minimum depth threshold (default\: 0.01)]:pipeline.model.clip-thresh:"
  "--pipeline.model.cull-screen-size[if a gaussian is more than this percent of screen space, cull it (default\: 0.15)]:pipeline.model.cull-screen-size:"
  "--pipeline.model.split-screen-size[if a gaussian is more than this percent of screen space, split it (default\: 0.05)]:pipeline.model.split-screen-size:"
  "--pipeline.model.stop-screen-size-at[stop culling\/splitting at this step WRT screen size of gaussians (default\: 0)]:pipeline.model.stop-screen-size-at:"
  "--pipeline.model.random-init[whether to initialize the positions uniformly randomly (not SFM points) (default\: False)]:pipeline.model.random-init:(True False)"
  "--pipeline.model.num-random[Number of gaussians to initialize if random init is used (default\: 50000)]:pipeline.model.num-random:"
  "--pipeline.model.random-scale[Size of the cube to initialize random gaussians within (default\: 10.0)]:pipeline.model.random-scale:"
  "--pipeline.model.ssim-lambda[weight of ssim loss (default\: 0.2)]:pipeline.model.ssim-lambda:"
  "--pipeline.model.main-loss[main loss to use (default\: reg_l1)]:pipeline.model.main-loss:(l1 reg_l1 reg_l2)"
  "--pipeline.model.ssim-loss[ssim loss to use (default\: reg_ssim)]:pipeline.model.ssim-loss:(reg_ssim ssim)"
  "--pipeline.model.stop-split-at[stop splitting at this step (default\: 10000)]:pipeline.model.stop-split-at:"
  "--pipeline.model.sh-degree[maximum degree of spherical harmonics to use (default\: 3)]:pipeline.model.sh-degree:"
  "--pipeline.model.rasterize-mode[Classic mode of rendering will use the EWA volume splatting with a \[0.3, 0.3\] screen space blurring kernel. This
approach is however not suitable to render tiny gaussians at higher or lower resolution than the captured, which
results \"aliasing-like\" artifacts. The antialiased mode overcomes this limitation by calculating compensation factors
and apply them to the opacities of gaussians to preserve the total integrated density of splats.


However, PLY exported with antialiased rasterize mode is not compatible with classic mode. Thus many web viewers that
were implemented for classic mode can not render antialiased mode PLY properly without modifications. (default\: classic)]:pipeline.model.rasterize-mode:(classic antialiased)"
  "--pipeline.model.num-layers-medium[Number of hidden layers for medium MLP. (default\: 2)]:pipeline.model.num-layers-medium:"
  "--pipeline.model.hidden-dim-medium[Dimension of hidden layers for medium MLP. (default\: 128)]:pipeline.model.hidden-dim-medium:"
  "--pipeline.model.medium-density-bias[Bias for medium density (sigma_bs and sigma_attn). (default\: 0.0)]:pipeline.model.medium-density-bias:"
  "--pipeline.model.mlp-type[Type of MLP to use for medium MLP. (default\: tcnn)]:pipeline.model.mlp-type:(tcnn torch)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--optimizers.means.optimizer.lr[The learning rate to use. (default\: 0.00016)]:optimizers.means.optimizer.lr:"
  "--optimizers.means.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.means.optimizer.eps:"
  "--optimizers.means.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.means.optimizer.max-norm:"
  "--optimizers.means.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.means.optimizer.weight-decay:"
  "--optimizers.means.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.means.scheduler.lr-pre-warmup:"
  "--optimizers.means.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.means.scheduler.lr-final:"
  "--optimizers.means.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.means.scheduler.warmup-steps:"
  "--optimizers.means.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.means.scheduler.max-steps:"
  "--optimizers.means.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.means.scheduler.ramp:(linear cosine)"
  "--optimizers.features-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.features-dc.optimizer.lr:"
  "--optimizers.features-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-dc.optimizer.eps:"
  "--optimizers.features-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-dc.optimizer.max-norm:"
  "--optimizers.features-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-dc.optimizer.weight-decay:"
  "--optimizers.features-dc.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.features-dc.scheduler.lr-pre-warmup:"
  "--optimizers.features-dc.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.0025)]:optimizers.features-dc.scheduler.lr-final:"
  "--optimizers.features-dc.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.features-dc.scheduler.warmup-steps:"
  "--optimizers.features-dc.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.features-dc.scheduler.max-steps:"
  "--optimizers.features-dc.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.features-dc.scheduler.ramp:(linear cosine)"
  "--optimizers.features-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.features-rest.optimizer.lr:"
  "--optimizers.features-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-rest.optimizer.eps:"
  "--optimizers.features-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-rest.optimizer.max-norm:"
  "--optimizers.features-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-rest.optimizer.weight-decay:"
  "--optimizers.features-rest.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.features-rest.scheduler.lr-pre-warmup:"
  "--optimizers.features-rest.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.000125)]:optimizers.features-rest.scheduler.lr-final:"
  "--optimizers.features-rest.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.features-rest.scheduler.warmup-steps:"
  "--optimizers.features-rest.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.features-rest.scheduler.max-steps:"
  "--optimizers.features-rest.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.features-rest.scheduler.ramp:(linear cosine)"
  "--optimizers.opacities.optimizer.lr[The learning rate to use. (default\: 0.05)]:optimizers.opacities.optimizer.lr:"
  "--optimizers.opacities.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.opacities.optimizer.eps:"
  "--optimizers.opacities.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.opacities.optimizer.max-norm:"
  "--optimizers.opacities.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.opacities.optimizer.weight-decay:"
  "--optimizers.opacities.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.opacities.scheduler.lr-pre-warmup:"
  "--optimizers.opacities.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.05)]:optimizers.opacities.scheduler.lr-final:"
  "--optimizers.opacities.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.opacities.scheduler.warmup-steps:"
  "--optimizers.opacities.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.opacities.scheduler.max-steps:"
  "--optimizers.opacities.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.opacities.scheduler.ramp:(linear cosine)"
  "--optimizers.scales.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.scales.optimizer.lr:"
  "--optimizers.scales.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.scales.optimizer.eps:"
  "--optimizers.scales.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.scales.optimizer.max-norm:"
  "--optimizers.scales.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.scales.optimizer.weight-decay:"
  "--optimizers.scales.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.scales.scheduler.lr-pre-warmup:"
  "--optimizers.scales.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.005)]:optimizers.scales.scheduler.lr-final:"
  "--optimizers.scales.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.scales.scheduler.warmup-steps:"
  "--optimizers.scales.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.scales.scheduler.max-steps:"
  "--optimizers.scales.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.scales.scheduler.ramp:(linear cosine)"
  "--optimizers.quats.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.quats.optimizer.lr:"
  "--optimizers.quats.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.quats.optimizer.eps:"
  "--optimizers.quats.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.quats.optimizer.max-norm:"
  "--optimizers.quats.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.quats.optimizer.weight-decay:"
  "--optimizers.quats.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.quats.scheduler.lr-pre-warmup:"
  "--optimizers.quats.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.001)]:optimizers.quats.scheduler.lr-final:"
  "--optimizers.quats.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.quats.scheduler.warmup-steps:"
  "--optimizers.quats.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.quats.scheduler.max-steps:"
  "--optimizers.quats.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.quats.scheduler.ramp:(linear cosine)"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-mlp.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.medium-mlp.optimizer.lr:"
  "--optimizers.medium-mlp.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-mlp.optimizer.eps:"
  "--optimizers.medium-mlp.optimizer.max-norm[The max norm to use for gradient clipping. (default\: 0.001)]:optimizers.medium-mlp.optimizer.max-norm:"
  "--optimizers.medium-mlp.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-mlp.optimizer.weight-decay:"
  "--optimizers.medium-mlp.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.medium-mlp.scheduler.lr-pre-warmup:"
  "--optimizers.medium-mlp.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.00015)]:optimizers.medium-mlp.scheduler.lr-final:"
  "--optimizers.medium-mlp.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.medium-mlp.scheduler.warmup-steps:"
  "--optimizers.medium-mlp.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.medium-mlp.scheduler.max-steps:"
  "--optimizers.medium-mlp.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.medium-mlp.scheduler.ramp:(linear cosine)"
  "--optimizers.direction-encoding.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.direction-encoding.optimizer.lr:"
  "--optimizers.direction-encoding.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.direction-encoding.optimizer.eps:"
  "--optimizers.direction-encoding.optimizer.max-norm[The max norm to use for gradient clipping. (default\: 0.001)]:optimizers.direction-encoding.optimizer.max-norm:"
  "--optimizers.direction-encoding.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.direction-encoding.optimizer.weight-decay:"
  "--optimizers.direction-encoding.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.direction-encoding.scheduler.lr-pre-warmup:"
  "--optimizers.direction-encoding.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.00015)]:optimizers.direction-encoding.scheduler.lr-final:"
  "--optimizers.direction-encoding.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.direction-encoding.scheduler.warmup-steps:"
  "--optimizers.direction-encoding.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.direction-encoding.scheduler.max-steps:"
  "--optimizers.direction-encoding.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.direction-encoding.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_water_splatting_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_water_splatting_big_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--output-dir[relative or absolute output directory to save all checkpoints and logging (default\: outputs)]:output-dir:_files -/"
  "--method-name[Method name. Required to set in python or via cli (default\: water-splatting-big)]:method-name:"
  "--experiment-name[Experiment name. If None, will automatically be set to dataset name (default\: None)]:experiment-name:"
  "--project-name[Project name. (default\: nerfstudio-project)]:project-name:"
  "--timestamp[Experiment timestamp. (default\: \'\{timestamp\}\')]:timestamp:"
  "--vis[Which visualizer to use. (default\: viewer)]:vis:(viewer wandb tensorboard comet viewer+wandb viewer+tensorboard viewer+comet viewer_legacy)"
  "--data[Alias for --pipeline.datamanager.data (default\: None)]:data:_files"
  "--prompt[Alias for --pipeline.model.prompt (default\: None)]:prompt:"
  "--relative-model-dir[Relative path to save all checkpoints. (default\: nerfstudio_models)]:relative-model-dir:_files -/"
  "--load-scheduler[Whether to load the scheduler state_dict to resume training, if it exists. (default\: True)]:load-scheduler:(True False)"
  "--steps-per-save[Number of steps between saves. (default\: 2000)]:steps-per-save:"
  "--steps-per-eval-batch[Number of steps between randomly sampled batches of rays. (default\: 0)]:steps-per-eval-batch:"
  "--steps-per-eval-image[Number of steps between single eval images. (default\: 100)]:steps-per-eval-image:"
  "--steps-per-eval-all-images[Number of steps between eval all images. (default\: 1000)]:steps-per-eval-all-images:"
  "--max-num-iterations[Maximum number of iterations to run. (default\: 15001)]:max-num-iterations:"
  "--mixed-precision[Whether or not to use mixed precision for training. (default\: False)]:mixed-precision:(True False)"
  "--use-grad-scaler[Use gradient scaler even if the automatic mixed precision is disabled. (default\: False)]:use-grad-scaler:(True False)"
  "--save-only-latest-checkpoint[Whether to only save the latest checkpoint or all checkpoints. (default\: True)]:save-only-latest-checkpoint:(True False)"
  "--load-dir[Optionally specify a pre-trained model directory to load from. (default\: None)]:load-dir:_files -/"
  "--load-step[Optionally specify model step to load from\; if none, will find most recent model in load_dir. (default\: None)]:load-step:"
  "--load-config[Path to config YAML file. (default\: None)]:load-config:_files"
  "--load-checkpoint[Path to checkpoint file. (default\: None)]:load-checkpoint:_files"
  "--log-gradients[Optionally log gradients during training (default\: False)]:log-gradients:(True False)"
  "--gradient-accumulation-steps[Number of steps to accumulate gradients over. Contains a mapping of \{param_group\:num\} (default\: )]:gradient-accumulation-steps:"
  "--start-paused[Whether to start the training in a paused state. (default\: False)]:start-paused:(True False)"
  "--machine.seed[random seed initialization (default\: 42)]:machine.seed:"
  "--machine.num-devices[total number of devices (e.g., gpus) available for train\/eval (default\: 1)]:machine.num-devices:"
  "--machine.num-machines[total number of distributed machines available (for DDP) (default\: 1)]:machine.num-machines:"
  "--machine.machine-rank[current machine\'s rank (for DDP) (default\: 0)]:machine.machine-rank:"
  "--machine.dist-url[distributed connection point (for DDP) (default\: auto)]:machine.dist-url:"
  "--machine.device-type[device type to use for training (default\: cuda)]:machine.device-type:(cpu cuda mps)"
  "--logging.relative-log-dir[relative path to save all logged events (default\: .)]:logging.relative-log-dir:_files -/"
  "--logging.steps-per-log[number of steps between logging stats (default\: 10)]:logging.steps-per-log:"
  "--logging.max-buffer-size[maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurrences. (default\: 20)]:logging.max-buffer-size:"
  "--logging.profiler[how to profile the code\;
\"basic\" - prints speed of all decorated functions at the end of a program.
\"pytorch\" - same as basic, but it also traces few training steps. (default\: basic)]:logging.profiler:(none basic pytorch)"
  "--logging.local-writer.enable[if True enables local logging, else disables (default\: True)]:logging.local-writer.enable:(True False)"
  "--logging.local-writer.stats-to-track[specifies which stats will be logged\/printed to terminal (default\: ITER_TRAIN_TIME TRAIN_RAYS_PER_SEC CURR_TEST_PSNR VIS_RAYS_PER_SEC TEST_RAYS_PER_SEC ETA)]:logging.local-writer.stats-to-track:(ITER_TRAIN_TIME TOTAL_TRAIN_TIME ETA TRAIN_RAYS_PER_SEC TEST_RAYS_PER_SEC VIS_RAYS_PER_SEC CURR_TEST_PSNR)"
  "--logging.local-writer.max-log-size[maximum number of rows to print before wrapping. if 0, will print everything. (default\: 10)]:logging.local-writer.max-log-size:"
  "--viewer.relative-log-filename[Filename to use for the log file. (default\: viewer_log_filename.txt)]:viewer.relative-log-filename:_files"
  "--viewer.websocket-port[The websocket port to connect to. If None, find an available port. (default\: None)]:viewer.websocket-port:"
  "--viewer.websocket-port-default[The default websocket port to connect to if websocket_port is not specified (default\: 7007)]:viewer.websocket-port-default:"
  "--viewer.websocket-host[The host address to bind the websocket server to. (default\: 0.0.0.0)]:viewer.websocket-host:"
  "--viewer.num-rays-per-chunk[number of rays per chunk to render with viewer (default\: 32768)]:viewer.num-rays-per-chunk:"
  "--viewer.max-num-display-images[Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training\/evaluation. If -1, display all. (default\: 512)]:viewer.max-num-display-images:"
  "--viewer.quit-on-train-completion[Whether to kill the training job when it has completed. Note this will stop rendering in the viewer. (default\: False)]:viewer.quit-on-train-completion:(True False)"
  "--viewer.image-format[Image format viewer should use\; jpeg is lossy compression, while png is lossless. (default\: jpeg)]:viewer.image-format:(jpeg png)"
  "--viewer.jpeg-quality[Quality tradeoff to use for jpeg compression. (default\: 75)]:viewer.jpeg-quality:"
  "--viewer.make-share-url[Viewer beta feature\: print a shareable URL. This flag is ignored in the legacy version of the viewer. (default\: False)]:viewer.make-share-url:(True False)"
  "--viewer.camera-frustum-scale[Scale for the camera frustums in the viewer. (default\: 0.1)]:viewer.camera-frustum-scale:"
  "--viewer.default-composite-depth[The default value for compositing depth. Turn off if you want to see the camera frustums without occlusions. (default\: True)]:viewer.default-composite-depth:(True False)"
  "--pipeline.datamanager.data[Source of data, may not be used by all models. (default\: None)]:pipeline.datamanager.data:_files"
  "--pipeline.datamanager.masks-on-gpu[Process masks on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.masks-on-gpu:(True False)"
  "--pipeline.datamanager.images-on-gpu[Process images on GPU for speed at the expense of memory, if True. (default\: False)]:pipeline.datamanager.images-on-gpu:(True False)"
  "--pipeline.datamanager.camera-res-scale-factor[The scale factor for scaling spatial data such as images, mask, semantics
along with relevant information about camera intrinsics (default\: 1.0)]:pipeline.datamanager.camera-res-scale-factor:"
  "--pipeline.datamanager.eval-num-images-to-sample-from[Number of images to sample during eval iteration. (default\: -1)]:pipeline.datamanager.eval-num-images-to-sample-from:"
  "--pipeline.datamanager.eval-num-times-to-repeat-images[When not evaluating on all images, number of iterations before picking
new images. If -1, never pick new images. (default\: -1)]:pipeline.datamanager.eval-num-times-to-repeat-images:"
  "--pipeline.datamanager.eval-image-indices[Specifies the image indices to use during eval\; if None, uses all. (default\: 0)]:pipeline.datamanager.eval-image-indices:"
  "--pipeline.datamanager.cache-images[Whether to cache images in memory. If \"cpu\", caches on cpu. If \"gpu\", caches on device. (default\: gpu)]:pipeline.datamanager.cache-images:(cpu gpu)"
  "--pipeline.datamanager.cache-images-type[The image type returned from manager, caching images in uint8 saves memory (default\: uint8)]:pipeline.datamanager.cache-images-type:(uint8 float32)"
  "--pipeline.datamanager.max-thread-workers[The maximum number of threads to use for caching images. If None, uses all available threads. (default\: None)]:pipeline.datamanager.max-thread-workers:"
  "--pipeline.datamanager.train-cameras-sampling-strategy[Specifies which sampling strategy is used to generate train cameras, \'random\' means sampling
uniformly random without replacement, \'fps\' means farthest point sampling which is helpful to reduce the artifacts
due to oversampling subsets of cameras that are very close to each other. (default\: random)]:pipeline.datamanager.train-cameras-sampling-strategy:(random fps)"
  "--pipeline.datamanager.train-cameras-sampling-seed[Random seed for sampling train cameras. Fixing seed may help reduce variance of trained models across
different runs. (default\: 42)]:pipeline.datamanager.train-cameras-sampling-seed:"
  "--pipeline.datamanager.fps-reset-every[The number of iterations before one resets fps sampler repeatly, which is essentially drawing fps_reset_every
samples from the pool of all training cameras without replacement before a new round of sampling starts. (default\: 100)]:pipeline.datamanager.fps-reset-every:"
  "--pipeline.model.enable-collider[Whether to create a scene collider to filter rays. (default\: True)]:pipeline.model.enable-collider:(True False)"
  "--pipeline.model.collider-params[parameters to instantiate scene collider with (default\: near_plane 2.0 far_plane 6.0)]:pipeline.model.collider-params:"
  "--pipeline.model.eval-num-rays-per-chunk[specifies number of rays per chunk during eval (default\: 4096)]:pipeline.model.eval-num-rays-per-chunk:"
  "--pipeline.model.prompt[A prompt to be used in text to NeRF models (default\: None)]:pipeline.model.prompt:"
  "--pipeline.model.num-steps[Number of steps to train the model (default\: 15001)]:pipeline.model.num-steps:"
  "--pipeline.model.warmup-length[period of steps where refinement is turned off (default\: 500)]:pipeline.model.warmup-length:"
  "--pipeline.model.refine-every[period of steps where gaussians are culled and densified (default\: 100)]:pipeline.model.refine-every:"
  "--pipeline.model.resolution-schedule[training starts at 1\/d resolution, every n steps this is doubled (default\: 3000)]:pipeline.model.resolution-schedule:"
  "--pipeline.model.background-color[Whether to randomize the background color. (default\: black)]:pipeline.model.background-color:(random black white)"
  "--pipeline.model.num-downscales[at the beginning, resolution is 1\/2\^d, where d is this number (default\: 2)]:pipeline.model.num-downscales:"
  "--pipeline.model.cull-alpha-thresh[threshold of opacity for culling gaussians. One can set it to a lower value (e.g. 0.005) for higher quality. (default\: 0.5)]:pipeline.model.cull-alpha-thresh:"
  "--pipeline.model.cull-alpha-thresh-post[threshold of opacity for post culling gaussians (default\: 0.1)]:pipeline.model.cull-alpha-thresh-post:"
  "--pipeline.model.reset-alpha-thresh[threshold of opacity for resetting alpha (default\: 0.5)]:pipeline.model.reset-alpha-thresh:"
  "--pipeline.model.cull-scale-thresh[threshold of scale for culling huge gaussians (default\: 10.0)]:pipeline.model.cull-scale-thresh:"
  "--pipeline.model.continue-cull-post-densification[If True, continue to cull gaussians post refinement (default\: False)]:pipeline.model.continue-cull-post-densification:(True False)"
  "--pipeline.model.zero-medium[If True, zero out the medium field (default\: False)]:pipeline.model.zero-medium:(True False)"
  "--pipeline.model.reset-alpha-every[Every this many refinement steps, reset the alpha (default\: 5)]:pipeline.model.reset-alpha-every:"
  "--pipeline.model.abs-grad-densification[If True, use absolute gradient for densification (default\: True)]:pipeline.model.abs-grad-densification:(True False)"
  "--pipeline.model.densify-grad-thresh[threshold of positional gradient norm for densifying gaussians (0.0004, 0.0008) (default\: 0.0008)]:pipeline.model.densify-grad-thresh:"
  "--pipeline.model.densify-size-thresh[below this size, gaussians are \*duplicated\*, otherwise split (default\: 0.001)]:pipeline.model.densify-size-thresh:"
  "--pipeline.model.n-split-samples[number of samples to split gaussians into (default\: 2)]:pipeline.model.n-split-samples:"
  "--pipeline.model.sh-degree-interval[every n intervals turn on another sh degree (default\: 1000)]:pipeline.model.sh-degree-interval:"
  "--pipeline.model.clip-thresh[minimum depth threshold (default\: 0.01)]:pipeline.model.clip-thresh:"
  "--pipeline.model.cull-screen-size[if a gaussian is more than this percent of screen space, cull it (default\: 0.15)]:pipeline.model.cull-screen-size:"
  "--pipeline.model.split-screen-size[if a gaussian is more than this percent of screen space, split it (default\: 0.05)]:pipeline.model.split-screen-size:"
  "--pipeline.model.stop-screen-size-at[stop culling\/splitting at this step WRT screen size of gaussians (default\: 0)]:pipeline.model.stop-screen-size-at:"
  "--pipeline.model.random-init[whether to initialize the positions uniformly randomly (not SFM points) (default\: False)]:pipeline.model.random-init:(True False)"
  "--pipeline.model.num-random[Number of gaussians to initialize if random init is used (default\: 50000)]:pipeline.model.num-random:"
  "--pipeline.model.random-scale[Size of the cube to initialize random gaussians within (default\: 10.0)]:pipeline.model.random-scale:"
  "--pipeline.model.ssim-lambda[weight of ssim loss (default\: 0.2)]:pipeline.model.ssim-lambda:"
  "--pipeline.model.main-loss[main loss to use (default\: reg_l1)]:pipeline.model.main-loss:(l1 reg_l1 reg_l2)"
  "--pipeline.model.ssim-loss[ssim loss to use (default\: reg_ssim)]:pipeline.model.ssim-loss:(reg_ssim ssim)"
  "--pipeline.model.stop-split-at[stop splitting at this step (default\: 10000)]:pipeline.model.stop-split-at:"
  "--pipeline.model.sh-degree[maximum degree of spherical harmonics to use (default\: 3)]:pipeline.model.sh-degree:"
  "--pipeline.model.rasterize-mode[Classic mode of rendering will use the EWA volume splatting with a \[0.3, 0.3\] screen space blurring kernel. This
approach is however not suitable to render tiny gaussians at higher or lower resolution than the captured, which
results \"aliasing-like\" artifacts. The antialiased mode overcomes this limitation by calculating compensation factors
and apply them to the opacities of gaussians to preserve the total integrated density of splats.


However, PLY exported with antialiased rasterize mode is not compatible with classic mode. Thus many web viewers that
were implemented for classic mode can not render antialiased mode PLY properly without modifications. (default\: classic)]:pipeline.model.rasterize-mode:(classic antialiased)"
  "--pipeline.model.num-layers-medium[Number of hidden layers for medium MLP. (default\: 2)]:pipeline.model.num-layers-medium:"
  "--pipeline.model.hidden-dim-medium[Dimension of hidden layers for medium MLP. (default\: 128)]:pipeline.model.hidden-dim-medium:"
  "--pipeline.model.medium-density-bias[Bias for medium density (sigma_bs and sigma_attn). (default\: 0.0)]:pipeline.model.medium-density-bias:"
  "--pipeline.model.mlp-type[Type of MLP to use for medium MLP. (default\: tcnn)]:pipeline.model.mlp-type:(tcnn torch)"
  "--pipeline.model.loss-coefficients.rgb-loss-coarse[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-coarse:"
  "--pipeline.model.loss-coefficients.rgb-loss-fine[(default\: 1.0)]:pipeline.model.loss-coefficients.rgb-loss-fine:"
  "--optimizers.means.optimizer.lr[The learning rate to use. (default\: 0.00016)]:optimizers.means.optimizer.lr:"
  "--optimizers.means.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.means.optimizer.eps:"
  "--optimizers.means.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.means.optimizer.max-norm:"
  "--optimizers.means.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.means.optimizer.weight-decay:"
  "--optimizers.means.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.means.scheduler.lr-pre-warmup:"
  "--optimizers.means.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.means.scheduler.lr-final:"
  "--optimizers.means.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.means.scheduler.warmup-steps:"
  "--optimizers.means.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.means.scheduler.max-steps:"
  "--optimizers.means.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.means.scheduler.ramp:(linear cosine)"
  "--optimizers.features-dc.scheduler[(default\: None)]:optimizers.features-dc.scheduler:(None)"
  "--optimizers.features-dc.optimizer.lr[The learning rate to use. (default\: 0.0025)]:optimizers.features-dc.optimizer.lr:"
  "--optimizers.features-dc.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-dc.optimizer.eps:"
  "--optimizers.features-dc.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-dc.optimizer.max-norm:"
  "--optimizers.features-dc.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-dc.optimizer.weight-decay:"
  "--optimizers.features-rest.scheduler[(default\: None)]:optimizers.features-rest.scheduler:(None)"
  "--optimizers.features-rest.optimizer.lr[The learning rate to use. (default\: 0.000125)]:optimizers.features-rest.optimizer.lr:"
  "--optimizers.features-rest.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.features-rest.optimizer.eps:"
  "--optimizers.features-rest.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.features-rest.optimizer.max-norm:"
  "--optimizers.features-rest.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.features-rest.optimizer.weight-decay:"
  "--optimizers.opacities.scheduler[(default\: None)]:optimizers.opacities.scheduler:(None)"
  "--optimizers.opacities.optimizer.lr[The learning rate to use. (default\: 0.05)]:optimizers.opacities.optimizer.lr:"
  "--optimizers.opacities.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.opacities.optimizer.eps:"
  "--optimizers.opacities.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.opacities.optimizer.max-norm:"
  "--optimizers.opacities.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.opacities.optimizer.weight-decay:"
  "--optimizers.scales.scheduler[(default\: None)]:optimizers.scales.scheduler:(None)"
  "--optimizers.scales.optimizer.lr[The learning rate to use. (default\: 0.005)]:optimizers.scales.optimizer.lr:"
  "--optimizers.scales.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.scales.optimizer.eps:"
  "--optimizers.scales.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.scales.optimizer.max-norm:"
  "--optimizers.scales.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.scales.optimizer.weight-decay:"
  "--optimizers.quats.scheduler[(default\: None)]:optimizers.quats.scheduler:(None)"
  "--optimizers.quats.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.quats.optimizer.lr:"
  "--optimizers.quats.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.quats.optimizer.eps:"
  "--optimizers.quats.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.quats.optimizer.max-norm:"
  "--optimizers.quats.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.quats.optimizer.weight-decay:"
  "--optimizers.camera-opt.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.camera-opt.optimizer.lr:"
  "--optimizers.camera-opt.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.camera-opt.optimizer.eps:"
  "--optimizers.camera-opt.optimizer.max-norm[The max norm to use for gradient clipping. (default\: None)]:optimizers.camera-opt.optimizer.max-norm:"
  "--optimizers.camera-opt.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.camera-opt.optimizer.weight-decay:"
  "--optimizers.camera-opt.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.camera-opt.scheduler.lr-pre-warmup:"
  "--optimizers.camera-opt.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 5e-05)]:optimizers.camera-opt.scheduler.lr-final:"
  "--optimizers.camera-opt.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.camera-opt.scheduler.warmup-steps:"
  "--optimizers.camera-opt.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.camera-opt.scheduler.max-steps:"
  "--optimizers.camera-opt.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.camera-opt.scheduler.ramp:(linear cosine)"
  "--optimizers.medium-mlp.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.medium-mlp.optimizer.lr:"
  "--optimizers.medium-mlp.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.medium-mlp.optimizer.eps:"
  "--optimizers.medium-mlp.optimizer.max-norm[The max norm to use for gradient clipping. (default\: 0.001)]:optimizers.medium-mlp.optimizer.max-norm:"
  "--optimizers.medium-mlp.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.medium-mlp.optimizer.weight-decay:"
  "--optimizers.medium-mlp.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.medium-mlp.scheduler.lr-pre-warmup:"
  "--optimizers.medium-mlp.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.00015)]:optimizers.medium-mlp.scheduler.lr-final:"
  "--optimizers.medium-mlp.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.medium-mlp.scheduler.warmup-steps:"
  "--optimizers.medium-mlp.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.medium-mlp.scheduler.max-steps:"
  "--optimizers.medium-mlp.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.medium-mlp.scheduler.ramp:(linear cosine)"
  "--optimizers.direction-encoding.optimizer.lr[The learning rate to use. (default\: 0.001)]:optimizers.direction-encoding.optimizer.lr:"
  "--optimizers.direction-encoding.optimizer.eps[The epsilon value to use. (default\: 1e-15)]:optimizers.direction-encoding.optimizer.eps:"
  "--optimizers.direction-encoding.optimizer.max-norm[The max norm to use for gradient clipping. (default\: 0.001)]:optimizers.direction-encoding.optimizer.max-norm:"
  "--optimizers.direction-encoding.optimizer.weight-decay[The weight decay to use. (default\: 0)]:optimizers.direction-encoding.optimizer.weight-decay:"
  "--optimizers.direction-encoding.scheduler.lr-pre-warmup[Learning rate before warmup. (default\: 1e-08)]:optimizers.direction-encoding.scheduler.lr-pre-warmup:"
  "--optimizers.direction-encoding.scheduler.lr-final[Final learning rate. If not provided, it will be set to the optimizers learning rate. (default\: 0.00015)]:optimizers.direction-encoding.scheduler.lr-final:"
  "--optimizers.direction-encoding.scheduler.warmup-steps[Number of warmup steps. (default\: 0)]:optimizers.direction-encoding.scheduler.warmup-steps:"
  "--optimizers.direction-encoding.scheduler.max-steps[The maximum number of steps. (default\: 15001)]:optimizers.direction-encoding.scheduler.max-steps:"
  "--optimizers.direction-encoding.scheduler.ramp[The ramp function to use during the warmup. (default\: cosine)]:optimizers.direction-encoding.scheduler.ramp:(linear cosine)"
)

_shtab_tyro_ns_train_water_splatting_big_arkit_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ARKitScenes folder with densely extracted scenes. (default\: data\/ARKitScenes\/3dod\/Validation\/41069021)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
)

_shtab_tyro_ns_train_water_splatting_big_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_water_splatting_big_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_water_splatting_big_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_water_splatting_big_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_water_splatting_big_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_water_splatting_big_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_water_splatting_big_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_splatting_big_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_water_splatting_big_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_water_splatting_big_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_splatting_big_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_water_splatting_big_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_water_splatting_big_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_water_splatting_big_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_water_splatting_big_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_water_splatting_blender_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/blender\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background, when set to None, InputDataset that consumes DataparserOutputs will not attempt
to blend with alpha_colors using image\'s alpha channel data. Thus rgba image will be directly used in training. (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--ply-path[Path to PLY file to load 3D points from, defined relative to the dataset directory. This is helpful for
Gaussian splatting and generally unused otherwise. If \`None\`, points are initialized randomly. (default\: None)]:pipeline.datamanager.dataparser.ply-path:_files"
)

_shtab_tyro_ns_train_water_splatting_colmap_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
)

_shtab_tyro_ns_train_water_splatting_dnerf_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/dnerf\/lego)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
)

_shtab_tyro_ns_train_water_splatting_dycheck_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/iphone\/mochi-high-five)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 5.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--downscale-factor[How much to downscale images. (default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-box-bound[Boundary of scene box. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-box-bound:"
)

_shtab_tyro_ns_train_water_splatting_instant_ngp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: data\/ours\/posterv2)]:pipeline.datamanager.dataparser.data:_files"
  "--scene-scale[How much to scale the scene. (default\: 0.3333)]:pipeline.datamanager.dataparser.scene-scale:"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
)

_shtab_tyro_ns_train_water_splatting_minimal_parser_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: \/home\/nikhil\/nerfstudio-main\/tests\/data\/lego_test\/minimal_parser)]:pipeline.datamanager.dataparser.data:_files"
)

_shtab_tyro_ns_train_water_splatting_nerfosr_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/NeRF-OSR\/Data)]:pipeline.datamanager.dataparser.data:_files"
  "--scene[Which scene to load (default\: stjacob)]:pipeline.datamanager.dataparser.scene:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--use-masks[Whether to use masks. (default\: False)]:pipeline.datamanager.dataparser.use-masks:(True False)"
  "--orientation-method[The method to use for orientation. (default\: vertical)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use for centering. (default\: focus)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_splatting_nerfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval.
All uses all the images for any split. (default\: fraction)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The percentage of the dataset to use for training. Only used when eval_mode is train-split-fraction. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--mask-color[Replace the unknown pixels with this color. Relevant if you have a mask but still sample everywhere. (default\: None)]:pipeline.datamanager.dataparser.mask-color:"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
)

_shtab_tyro_ns_train_water_splatting_nuscenes_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Name of the scene. (default\: scene-0103)]:pipeline.datamanager.dataparser.data:_files"
  "--data-dir[Path to NuScenes dataset. (default\: \/mnt\/local\/NuScenes)]:pipeline.datamanager.dataparser.data-dir:_files -/"
  "--version[Dataset version. (default\: v1.0-mini)]:pipeline.datamanager.dataparser.version:(v1.0-mini v1.0-trainval)"
  "--cameras[Which cameras to use. (default\: FRONT)]:pipeline.datamanager.dataparser.cameras:(FRONT FRONT_LEFT FRONT_RIGHT BACK BACK_LEFT BACK_RIGHT)"
  "--mask-dir[Path to masks of dynamic objects. (default\: None)]:pipeline.datamanager.dataparser.mask-dir:_files -/"
  "--train-split-fraction[The percent of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--verbose[Load dataset with verbose messaging (default\: False)]:pipeline.datamanager.dataparser.verbose:(True False)"
)

_shtab_tyro_ns_train_water_splatting_phototourism_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/phototourism\/brandenburg-gate)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 3.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--alpha-color[alpha color of background (default\: white)]:pipeline.datamanager.dataparser.alpha-color:"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
)

_shtab_tyro_ns_train_water_splatting_scannet_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Path to ScanNet folder with densely extracted scenes. (default\: nvsmask3d\/data\/scene_example)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--load-3D-points[Whether to load the 3D points from the .ply (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--point-cloud-color[read point cloud colors from .ply files or not (default\: True)]:pipeline.datamanager.dataparser.point-cloud-color:(True False)"
  "--ply-file-path[path to the .ply file containing the 3D points (default\: nvsmask3d\/data\/scene_example\/scene_example.ply)]:pipeline.datamanager.dataparser.ply-file-path:_files"
)

_shtab_tyro_ns_train_water_splatting_scannetpp_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory to the root of the data. (default\: scannetpp\/410c470782)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--scene-scale[How much to scale the region of interest by. Default is 1.5 since the cameras are inside the rooms. (default\: 1.5)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--images-dir[Relative path to the images directory (default\: resized_images) (default\: dslr\/resized_images)]:pipeline.datamanager.dataparser.images-dir:_files -/"
  "--masks-dir[Relative path to the masks directory (default\: resized_anon_masks) (default\: dslr\/resized_anon_masks)]:pipeline.datamanager.dataparser.masks-dir:_files -/"
  "--transforms-path[Relative path to the transforms.json file (default\: dslr\/nerfstudio\/transforms.json)]:pipeline.datamanager.dataparser.transforms-path:_files"
)

_shtab_tyro_ns_train_water_splatting_sdfstudio_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/DTU\/scan65)]:pipeline.datamanager.dataparser.data:_files"
  "--include-mono-prior[whether or not to load monocular depth and normal (default\: False)]:pipeline.datamanager.dataparser.include-mono-prior:(True False)"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--include-foreground-mask[whether or not to load foreground mask (default\: False)]:pipeline.datamanager.dataparser.include-foreground-mask:(True False)"
  "--downscale-factor[(default\: 1)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--skip-every-for-val-split[sub sampling validation images (default\: 1)]:pipeline.datamanager.dataparser.skip-every-for-val-split:"
  "--auto-orient[(default\: True)]:pipeline.datamanager.dataparser.auto-orient:(True False)"
)

_shtab_tyro_ns_train_water_splatting_sitcoms3d_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory specifying location of data. (default\: data\/sitcoms3d\/TBBT-big_living_room)]:pipeline.datamanager.dataparser.data:_files"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--downscale-factor[(default\: 4)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--scene-scale[Sets the bounding cube to have edge length of this size.
The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value. (default\: 2.0)]:pipeline.datamanager.dataparser.scene-scale:"
)

_shtab_tyro_ns_train_water_splatting_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_water_water_data_options=(
  {-h,--help}"[show this help message and exit]:help:"
  "--data[Directory or explicit json file path specifying location of data. (default\: .)]:pipeline.datamanager.dataparser.data:_files"
  "--scale-factor[How much to scale the camera origins by. (default\: 1.0)]:pipeline.datamanager.dataparser.scale-factor:"
  "--downscale-factor[How much to downscale images. If not set, images are chosen such that the max dimension is \<1600px. (default\: None)]:pipeline.datamanager.dataparser.downscale-factor:"
  "--downscale-rounding-mode[How to round downscale image height and Image width. (default\: floor)]:pipeline.datamanager.dataparser.downscale-rounding-mode:(floor round ceil)"
  "--scene-scale[How much to scale the region of interest by. (default\: 1.0)]:pipeline.datamanager.dataparser.scene-scale:"
  "--orientation-method[The method to use for orientation. (default\: up)]:pipeline.datamanager.dataparser.orientation-method:(pca up vertical none)"
  "--center-method[The method to use to center the poses. (default\: poses)]:pipeline.datamanager.dataparser.center-method:(poses focus none)"
  "--auto-scale-poses[Whether to automatically scale the poses to fit in \+\/- 1 bounding box. (default\: True)]:pipeline.datamanager.dataparser.auto-scale-poses:(True False)"
  "--assume-colmap-world-coordinate-convention[Colmap optimized world often have y direction of the first camera pointing towards down direction,
while nerfstudio world set z direction to be up direction for viewer. Therefore, we usually need to apply an extra
transform when orientation_method\=none. This parameter has no effects if orientation_method is set other than none.
When this parameter is set to False, no extra transform is applied when reading data from colmap. (default\: True)]:pipeline.datamanager.dataparser.assume-colmap-world-coordinate-convention:(True False)"
  "--eval-mode[The method to use for splitting the dataset into train and eval.
Fraction splits based on a percentage for train and the remaining for eval.
Filename splits based on filenames containing train\/eval.
Interval uses every nth frame for eval (used by most academic papers, e.g. MipNerf360, GSplat).
All uses all the images for any split. (default\: interval)]:pipeline.datamanager.dataparser.eval-mode:(fraction filename interval all)"
  "--train-split-fraction[The fraction of images to use for training. The remaining images are for eval. (default\: 0.9)]:pipeline.datamanager.dataparser.train-split-fraction:"
  "--eval-interval[The interval between frames to use for eval. Only used when eval_mode is eval-interval. (default\: 8)]:pipeline.datamanager.dataparser.eval-interval:"
  "--depth-unit-scale-factor[Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion. (default\: 0.001)]:pipeline.datamanager.dataparser.depth-unit-scale-factor:"
  "--images-path[Path to images directory relative to the data path. (default\: images)]:pipeline.datamanager.dataparser.images-path:_files"
  "--masks-path[Path to masks directory. If not set, masks are not loaded. (default\: None)]:pipeline.datamanager.dataparser.masks-path:_files"
  "--depths-path[Path to depth maps directory. If not set, depths are not loaded. (default\: None)]:pipeline.datamanager.dataparser.depths-path:_files"
  "--colmap-path[Path to the colmap reconstruction directory relative to the data path. (default\: colmap\/sparse\/0)]:pipeline.datamanager.dataparser.colmap-path:_files"
  "--load-3D-points[Whether to load the 3D points from the colmap reconstruction. This is helpful for Gaussian splatting and
generally unused otherwise, but it\'s typically harmless so we default to True. (default\: True)]:pipeline.datamanager.dataparser.load-3D-points:(True False)"
  "--max-2D-matches-per-3D-point[Maximum number of 2D matches per 3D point. If set to -1, all 2D matches are loaded. If set to 0, no 2D matches are loaded. (default\: 0)]:pipeline.datamanager.dataparser.max-2D-matches-per-3D-point:"
  "--include-semantics[whether or not to include loading of semantics data (default\: True)]:pipeline.datamanager.dataparser.include-semantics:(True False)"
  "--label-path[Path to semantic labels directory relative to the data path. (default\: label)]:pipeline.datamanager.dataparser.label-path:_files"
)

_shtab_tyro_ns_train_zipnerf_options=(
  {-h,--help}"[show this help message and exit]:help:"
)


_shtab_tyro_ns_train() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_options+=(': :_shtab_tyro_ns_train_commands' '*::: :->ns-train')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_options

  case $state in
    ns-train)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train-$line[1]:"
      case $line[1] in
        BioNeRF) _arguments -C -s $_shtab_tyro_ns_train_BioNeRF_options ;;
        depth-nerfacto) _shtab_tyro_ns_train_depth_nerfacto ;;
        dnerf) _shtab_tyro_ns_train_dnerf ;;
        generfacto) _arguments -C -s $_shtab_tyro_ns_train_generfacto_options ;;
        igs2gs) _arguments -C -s $_shtab_tyro_ns_train_igs2gs_options ;;
        in2n) _arguments -C -s $_shtab_tyro_ns_train_in2n_options ;;
        in2n-small) _arguments -C -s $_shtab_tyro_ns_train_in2n_small_options ;;
        in2n-tiny) _arguments -C -s $_shtab_tyro_ns_train_in2n_tiny_options ;;
        instant-ngp) _shtab_tyro_ns_train_instant_ngp ;;
        instant-ngp-bounded) _shtab_tyro_ns_train_instant_ngp_bounded ;;
        kplanes) _arguments -C -s $_shtab_tyro_ns_train_kplanes_options ;;
        kplanes-dynamic) _arguments -C -s $_shtab_tyro_ns_train_kplanes_dynamic_options ;;
        lerf) _arguments -C -s $_shtab_tyro_ns_train_lerf_options ;;
        lerf-big) _arguments -C -s $_shtab_tyro_ns_train_lerf_big_options ;;
        lerf-lite) _arguments -C -s $_shtab_tyro_ns_train_lerf_lite_options ;;
        mipnerf) _shtab_tyro_ns_train_mipnerf ;;
        nerfacto) _shtab_tyro_ns_train_nerfacto ;;
        nerfacto-big) _shtab_tyro_ns_train_nerfacto_big ;;
        nerfacto-huge) _shtab_tyro_ns_train_nerfacto_huge ;;
        nerfgs) _arguments -C -s $_shtab_tyro_ns_train_nerfgs_options ;;
        nerfplayer-nerfacto) _arguments -C -s $_shtab_tyro_ns_train_nerfplayer_nerfacto_options ;;
        nerfplayer-ngp) _arguments -C -s $_shtab_tyro_ns_train_nerfplayer_ngp_options ;;
        nerfsh) _arguments -C -s $_shtab_tyro_ns_train_nerfsh_options ;;
        neus) _shtab_tyro_ns_train_neus ;;
        neus-facto) _shtab_tyro_ns_train_neus_facto ;;
        phototourism) _shtab_tyro_ns_train_phototourism ;;
        pynerf) _arguments -C -s $_shtab_tyro_ns_train_pynerf_options ;;
        pynerf-occupancy-grid) _arguments -C -s $_shtab_tyro_ns_train_pynerf_occupancy_grid_options ;;
        pynerf-synthetic) _arguments -C -s $_shtab_tyro_ns_train_pynerf_synthetic_options ;;
        seathru-nerf) _shtab_tyro_ns_train_seathru_nerf ;;
        seathru-nerf-lite) _shtab_tyro_ns_train_seathru_nerf_lite ;;
        semantic-nerfw) _shtab_tyro_ns_train_semantic_nerfw ;;
        signerf) _arguments -C -s $_shtab_tyro_ns_train_signerf_options ;;
        signerf_nerfacto) _arguments -C -s $_shtab_tyro_ns_train_signerf_nerfacto_options ;;
        splatfacto) _shtab_tyro_ns_train_splatfacto ;;
        splatfacto-big) _shtab_tyro_ns_train_splatfacto_big ;;
        splatfacto-w) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_w_options ;;
        tensorf) _shtab_tyro_ns_train_tensorf ;;
        tetra-nerf) _arguments -C -s $_shtab_tyro_ns_train_tetra_nerf_options ;;
        tetra-nerf-original) _arguments -C -s $_shtab_tyro_ns_train_tetra_nerf_original_options ;;
        vanilla-nerf) _shtab_tyro_ns_train_vanilla_nerf ;;
        volinga) _arguments -C -s $_shtab_tyro_ns_train_volinga_options ;;
        water) _shtab_tyro_ns_train_water ;;
        water-big) _shtab_tyro_ns_train_water_big ;;
        water-ex) _shtab_tyro_ns_train_water_ex ;;
        water-splatting) _shtab_tyro_ns_train_water_splatting ;;
        water-splatting-big) _shtab_tyro_ns_train_water_splatting_big ;;
        zipnerf) _arguments -C -s $_shtab_tyro_ns_train_zipnerf_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_depth_nerfacto() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_depth_nerfacto_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_depth_nerfacto_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_depth_nerfacto_options+=(': :_shtab_tyro_ns_train_depth_nerfacto_commands' '*::: :->depth-nerfacto')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_options

  case $state in
    depth-nerfacto)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_depth_nerfacto-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_depth_nerfacto_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_dnerf() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_dnerf_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_dnerf_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_dnerf_options+=(': :_shtab_tyro_ns_train_dnerf_commands' '*::: :->dnerf')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_dnerf_options

  case $state in
    dnerf)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_dnerf-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_dnerf_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_dnerf_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_dnerf_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_instant_ngp() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_instant_ngp_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_instant_ngp_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_instant_ngp_options+=(': :_shtab_tyro_ns_train_instant_ngp_commands' '*::: :->instant-ngp')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_options

  case $state in
    instant-ngp)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_instant_ngp-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_instant_ngp_bounded() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_instant_ngp_bounded_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_instant_ngp_bounded_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_instant_ngp_bounded_options+=(': :_shtab_tyro_ns_train_instant_ngp_bounded_commands' '*::: :->instant-ngp-bounded')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_options

  case $state in
    instant-ngp-bounded)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_instant_ngp_bounded-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_instant_ngp_bounded_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_mipnerf() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_mipnerf_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_mipnerf_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_mipnerf_options+=(': :_shtab_tyro_ns_train_mipnerf_commands' '*::: :->mipnerf')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_mipnerf_options

  case $state in
    mipnerf)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_mipnerf-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_mipnerf_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_nerfacto() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_nerfacto_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_nerfacto_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_nerfacto_options+=(': :_shtab_tyro_ns_train_nerfacto_commands' '*::: :->nerfacto')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_nerfacto_options

  case $state in
    nerfacto)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_nerfacto-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_nerfacto_big() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_nerfacto_big_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_nerfacto_big_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_nerfacto_big_options+=(': :_shtab_tyro_ns_train_nerfacto_big_commands' '*::: :->nerfacto-big')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_options

  case $state in
    nerfacto-big)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_nerfacto_big-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_big_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_nerfacto_huge() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_nerfacto_huge_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_nerfacto_huge_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_nerfacto_huge_options+=(': :_shtab_tyro_ns_train_nerfacto_huge_commands' '*::: :->nerfacto-huge')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_options

  case $state in
    nerfacto-huge)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_nerfacto_huge-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_nerfacto_huge_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_neus() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_neus_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_neus_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_neus_options+=(': :_shtab_tyro_ns_train_neus_commands' '*::: :->neus')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_neus_options

  case $state in
    neus)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_neus-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_neus_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_neus_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_neus_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_neus_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_neus_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_neus_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_neus_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_neus_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_neus_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_neus_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_neus_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_neus_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_neus_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_neus_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_neus_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_neus_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_neus_facto() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_neus_facto_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_neus_facto_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_neus_facto_options+=(': :_shtab_tyro_ns_train_neus_facto_commands' '*::: :->neus-facto')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_neus_facto_options

  case $state in
    neus-facto)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_neus_facto-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_neus_facto_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_phototourism() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_phototourism_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_phototourism_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_phototourism_options+=(': :_shtab_tyro_ns_train_phototourism_commands' '*::: :->phototourism')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_phototourism_options

  case $state in
    phototourism)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_phototourism-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_phototourism_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_phototourism_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_phototourism_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_seathru_nerf() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_seathru_nerf_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_seathru_nerf_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_seathru_nerf_options+=(': :_shtab_tyro_ns_train_seathru_nerf_commands' '*::: :->seathru-nerf')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_options

  case $state in
    seathru-nerf)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_seathru_nerf-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_seathru_nerf_lite() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_seathru_nerf_lite_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_seathru_nerf_lite_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_seathru_nerf_lite_options+=(': :_shtab_tyro_ns_train_seathru_nerf_lite_commands' '*::: :->seathru-nerf-lite')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_options

  case $state in
    seathru-nerf-lite)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_seathru_nerf_lite-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_seathru_nerf_lite_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_semantic_nerfw() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_semantic_nerfw_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_semantic_nerfw_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_semantic_nerfw_options+=(': :_shtab_tyro_ns_train_semantic_nerfw_commands' '*::: :->semantic-nerfw')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_options

  case $state in
    semantic-nerfw)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_semantic_nerfw-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_semantic_nerfw_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_splatfacto() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_splatfacto_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_splatfacto_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_splatfacto_options+=(': :_shtab_tyro_ns_train_splatfacto_commands' '*::: :->splatfacto')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_splatfacto_options

  case $state in
    splatfacto)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_splatfacto-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_splatfacto_big() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_splatfacto_big_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_splatfacto_big_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_splatfacto_big_options+=(': :_shtab_tyro_ns_train_splatfacto_big_commands' '*::: :->splatfacto-big')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_options

  case $state in
    splatfacto-big)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_splatfacto_big-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_splatfacto_big_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_tensorf() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_tensorf_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_tensorf_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_tensorf_options+=(': :_shtab_tyro_ns_train_tensorf_commands' '*::: :->tensorf')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_tensorf_options

  case $state in
    tensorf)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_tensorf-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_tensorf_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_tensorf_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_tensorf_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_vanilla_nerf() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_vanilla_nerf_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_vanilla_nerf_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_vanilla_nerf_options+=(': :_shtab_tyro_ns_train_vanilla_nerf_commands' '*::: :->vanilla-nerf')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_options

  case $state in
    vanilla-nerf)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_vanilla_nerf-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_vanilla_nerf_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_water() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_water_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_water_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_water_options+=(': :_shtab_tyro_ns_train_water_commands' '*::: :->water')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_water_options

  case $state in
    water)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_water-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_water_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_water_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_water_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_water_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_water_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_water_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_water_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_water_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_water_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_water_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_water_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_water_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_water_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_water_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_water_big() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_water_big_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_water_big_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_water_big_options+=(': :_shtab_tyro_ns_train_water_big_commands' '*::: :->water-big')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_water_big_options

  case $state in
    water-big)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_water_big-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_water_big_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_water_big_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_water_big_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_water_ex() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_water_ex_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_water_ex_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_water_ex_options+=(': :_shtab_tyro_ns_train_water_ex_commands' '*::: :->water-ex')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_water_ex_options

  case $state in
    water-ex)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_water_ex-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_water_ex_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_water_ex_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_water_ex_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_water_splatting() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_water_splatting_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_water_splatting_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_water_splatting_options+=(': :_shtab_tyro_ns_train_water_splatting_commands' '*::: :->water-splatting')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_water_splatting_options

  case $state in
    water-splatting)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_water_splatting-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_water_data_options ;;
      esac
  esac
}

_shtab_tyro_ns_train_water_splatting_big() {
  local context state line curcontext="$curcontext" one_or_more='(-)*' remainder='(*)'

  if ((${_shtab_tyro_ns_train_water_splatting_big_options[(I)${(q)one_or_more}*]} + ${_shtab_tyro_ns_train_water_splatting_big_options[(I)${(q)remainder}*]} == 0)); then  # noqa: E501
    _shtab_tyro_ns_train_water_splatting_big_options+=(': :_shtab_tyro_ns_train_water_splatting_big_commands' '*::: :->water-splatting-big')
  fi
  _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_options

  case $state in
    water-splatting-big)
      words=($line[1] "${words[@]}")
      (( CURRENT += 1 ))
      curcontext="${curcontext%:*:*}:_shtab_tyro_ns_train_water_splatting_big-$line[1]:"
      case $line[1] in
        arkit-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_arkit_data_options ;;
        blender-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_blender_data_options ;;
        colmap) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_colmap_options ;;
        dnerf-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_dnerf_data_options ;;
        dycheck-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_dycheck_data_options ;;
        instant-ngp-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_instant_ngp_data_options ;;
        minimal-parser) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_minimal_parser_options ;;
        nerfosr-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_nerfosr_data_options ;;
        nerfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_nerfstudio_data_options ;;
        nuscenes-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_nuscenes_data_options ;;
        phototourism-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_phototourism_data_options ;;
        scannet-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_scannet_data_options ;;
        scannetpp-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_scannetpp_data_options ;;
        sdfstudio-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_sdfstudio_data_options ;;
        sitcoms3d-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_sitcoms3d_data_options ;;
        water-data) _arguments -C -s $_shtab_tyro_ns_train_water_splatting_big_water_data_options ;;
      esac
  esac
}



typeset -A opt_args

if [[ $zsh_eval_context[-1] == eval ]]; then
  # eval/source/. command, register function for later
  compdef _shtab_tyro_ns_train -N ns-train
else
  # autoload from fpath, call function directly
  _shtab_tyro_ns_train "$@"
fi

