version: AAE_encoder_8qubits
device: cpu
seed: 42
n_epochs: 10
noise_factor: 0
noisy_probability: 0
state_generator:
  loss: DotProd
  n_train_step: 100
  aae_encoder:
    q_device: default.qubit
    n_qubits: 8
    n_encoder_layers: 40
    noisy: false
    AmplitudeDamping: 0
    DepolarizingChannel: 0
  optimizer:
    name: Adam
    args:
      lr: 0.01
dataset:
  root: ./FractalDB/fractaldb_cat60_ins1000
  transform: ToTensor
checkpoint:
  logs: ./logs/superencoder/${version}
  save_path: ./trained_models/superencoder_${version}_${state_generator.loss}.pt
dataloader:
  batch_size: 32
  num_workers: 0
  pin_memory: false

AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
Testing Dataset: beta_a1b1_8-qubits
Testing Dataset: exponential_rate1_8-qubits
Testing Dataset: lognormal_mean0std1_8-qubits
Testing Dataset: normal_mean0.3std0.5_8-qubits
Testing Dataset: uniform_low0high1_8-qubits
AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
{'beta_a1b1_8-qubits': 0.9995369811499091, 'exponential_rate1_8-qubits': 0.9990556714420801, 'lognormal_mean0std1_8-qubits': 0.9988614614659161, 'normal_mean0.3std0.5_8-qubits': 0.9984511338909869, 'uniform_low0high1_8-qubits': 0.9996153665893993}
AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
Testing Dataset: beta_a1b1_8-qubits
59.419353025998134
AAE_StateGenerator(
  (criterion): FidLossDotProdAAE()
  (aae_encoder): <Quantum Torch Layer: func=aae_encoder>
)
Testing Dataset: beta_a1b1_8-qubits
0.4035022883763304
