version: AAE_encoder_6qubits
device: cpu
seed: 42
n_epochs: 10
noise_factor: 0
noisy_probability: 0
state_generator:
  loss: DotProd
  n_train_step: 100
  aae_encoder:
    q_device: default.qubit
    n_qubits: 6
    n_encoder_layers: 20
    noisy: false
    AmplitudeDamping: 0
    DepolarizingChannel: 0
  optimizer:
    name: Adam
    args:
      lr: 0.01
dataset:
  root: ./FractalDB/fractaldb_cat60_ins1000
  transform: ToTensor
checkpoint:
  logs: ./logs/superencoder/${version}
  save_path: ./trained_models/superencoder_${version}_${state_generator.loss}.pt
dataloader:
  batch_size: 32
  num_workers: 0
  pin_memory: false

AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
Testing Dataset: beta_a1b1_6-qubits
Testing Dataset: exponential_rate1_6-qubits
Testing Dataset: lognormal_mean0std1_6-qubits
Testing Dataset: normal_mean0.3std0.5_6-qubits
Testing Dataset: uniform_low0high1_6-qubits
AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
{'beta_a1b1_6-qubits': 0.9999892473287297, 'exponential_rate1_6-qubits': 0.9999738901264312, 'lognormal_mean0std1_6-qubits': 0.9999689982098274, 'normal_mean0.3std0.5_6-qubits': 0.9999524962915375, 'uniform_low0high1_6-qubits': 0.9999919127013125}
AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
Testing Dataset: beta_a1b1_6-qubits
20.181015200381808
AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
Testing Dataset: beta_a1b1_6-qubits
0.1526855963820708
