diff --git a/benchmarks/LOWQ2/signal_training/Convert_Data.C b/benchmarks/LOWQ2/signal_training/Convert_Data.C
index 3b8cc4314a05a2abfbc52fdf464597821019e09f..2a418e3f0bfb57e8ddbd041b89b7094de4d1c0e5 100644
--- a/benchmarks/LOWQ2/signal_training/Convert_Data.C
+++ b/benchmarks/LOWQ2/signal_training/Convert_Data.C
@@ -11,7 +11,7 @@
 #include "PixelHit.hpp"
 
 
-int Convert_Data(TString inName="output/Out_tpx4_big.root", TString outName="output/Out_Convert_Big.root") {
+int Convert_Data(TString inName="output/Out_tpx4_genprop.root", TString outName="output/Out_Convert_genprop.root") {
 
 
 
diff --git a/benchmarks/LOWQ2/signal_training/allpix2_config.cfg b/benchmarks/LOWQ2/signal_training/allpix2_config.cfg
index 53c6aa86334c75dce8e11f3f3a7f4c7283f10b3c..4f2c7d5d1e3794b8c49802dda526c956f3248c0e 100644
--- a/benchmarks/LOWQ2/signal_training/allpix2_config.cfg
+++ b/benchmarks/LOWQ2/signal_training/allpix2_config.cfg
@@ -16,13 +16,19 @@ max_step_length = 0.1um
 
 [ElectricFieldReader]
 model="linear"
-bias_voltage = -200V
+bias_voltage=-150V
+depletion_voltage=-100V
 #output_plots = true
 
-[ProjectionPropagation]
+[GenericPropagation]
+#type = "timepix"
 temperature = 293K
 charge_per_step = 25
 
+# [ProjectionPropagation]
+# temperature = 293K
+# charge_per_step = 25
+
 #[WeightingPotentialReader]
 #model = "pad"
 #[TransientPropagation]
@@ -35,6 +41,19 @@ charge_per_step = 25
 timestep = 0.1ns
 #output_pulsegraphs = true
 
+# [DefaultDigitizer]
+# electronics_noise = 77e
+# threshold = 1000e
+# threshold_smearing = 35e
+# qdc_smearing = 0e
+# qdc_resolution = 8
+# qdc_slope = 180e
+# qdc_offset = -1000e
+# output_plots = true
+
+# tdc_slope = 0.195ns
+# tdc_resolution = 8
+
 [CSADigitizer]
 model = "simple"
 feedback_capacitance = 5e-15C/V
@@ -56,5 +75,5 @@ mode = "gui"
 accumulate = 1
 
 [ROOTObjectWriter]
-file_name = "Out_tpx4_big.root"
+file_name = "Out_tpx4_genprop.root"
 include = ["PixelCharge","PixelHit","MCParticle"]
\ No newline at end of file
diff --git a/benchmarks/LOWQ2/signal_training/model_ad.py b/benchmarks/LOWQ2/signal_training/model_ad.py
index 798824ec1be5de60a9c92ae769903829b2b7a47d..d8965c5251fd2cdf56a1360b9f6de969b88b49f4 100644
--- a/benchmarks/LOWQ2/signal_training/model_ad.py
+++ b/benchmarks/LOWQ2/signal_training/model_ad.py
@@ -15,8 +15,10 @@ class Encoder(tf.keras.Model):
         self.encode = tf.keras.Sequential([
             tfkl.InputLayer(shape=(flat_shape+nconditions,)),
             self.normalizer,
-            tfkl.Dense(128, activation='relu'),
-            tfkl.Dense(64, activation='relu'),
+            tfkl.Dense(1024, activation='relu'),
+            tfkl.Dense(1024, activation='relu'),
+            tfkl.Dense(256, activation='relu'),
+            tfkl.Dense(256, activation='relu'),
             tfkl.Dense(2*latent_dim),
         ])
 
@@ -38,8 +40,10 @@ class Decoder(tf.keras.Model):
         self.normalizer = tfkl.Normalization(name='decode_normalizer')
         self.decode = tf.keras.Sequential([            
             tfkl.InputLayer(shape=(latent_dim+nconditions,),name='input_layer'),
-            tfkl.Dense(64, activation='relu'),
-            tfkl.Dense(128, activation='relu'),
+            tfkl.Dense(256, activation='relu'),
+            tfkl.Dense(256, activation='relu'),
+            tfkl.Dense(1024, activation='relu'),
+            tfkl.Dense(1024, activation='relu'),
             tfkl.Dense(flat_shape, name='output_layer')
         ])
 
@@ -59,8 +63,10 @@ class Adversarial(tf.keras.Model):
         super(Adversarial, self).__init__()
         self.reconstruct_conditions = tf.keras.Sequential([
             tfkl.InputLayer(shape=(latent_dim,)),
-            tfkl.Dense(128, activation='relu'),
+            tfkl.Dense(1024, activation='relu'),
+            tfkl.Dense(256, activation='relu'),
             tfkl.Dense(64, activation='relu'),
+            tfkl.Dense(16, activation='relu'),
             tfkl.Dense(nconditions, activation='linear')
         ])
 
@@ -105,16 +111,6 @@ class VAE(tf.keras.Model):
         reconstruction = self.decoder(conditions,mean)
         #advisarial_conditions = self.adversarial(mean)
 
-
-        # Reconstruction loss
-        #reconstruction_loss = tf.reduce_mean(tf.reduce_sum(tf.math.squared_difference(x, reconstruction)))
-        #kl_loss = -0.5 * tf.reduce_sum(1 + logvar - tf.square(mean) - tf.exp(logvar), axis=1)
-        #adversarial_loss = tf.reduce_mean(tf.reduce_sum(tf.math.squared_difference(conditions, advisarial_conditions)))
-        
-        #total_loss = tf.reduce_mean(reconstruction_loss + kl_loss - adversarial_loss)
-        
-        # add loss
-        #self.add_loss(total_loss)
         return reconstruction
     
     def train_step(self, input):
@@ -205,10 +201,15 @@ class LatentSpace(tf.keras.Model):
         super(LatentSpace, self).__init__()
         self.flat_shape  = original_model.flat_shape
         self.nconditions = original_model.nconditions
-        self.input_layer = tfkl.InputLayer(input_shape=(self.flat_shape+self.nconditions,))
+        self.input_layer = tfkl.InputLayer(shape=(self.flat_shape+self.nconditions,))
         self.encoder    = original_model.encoder
         self.output_names = ['output']
 
+    def reparameterize(self, mean, logvar):
+        eps = tf.random.normal(shape=tf.shape(mean))
+        return eps * tf.exp(logvar * .5) + mean
+    
     def call(self, input):
         mean, logvar = self.encoder(input)
-        return mean, logvar
\ No newline at end of file
+        z = self.reparameterize(mean, logvar)
+        return z
\ No newline at end of file
diff --git a/benchmarks/LOWQ2/signal_training/test_model.py b/benchmarks/LOWQ2/signal_training/test_model.py
index b9b45b993aada303c7ffc5c206cc8191d9ede6ec..ac72c187945bef5f64785bab0b437518dbe196cf 100644
--- a/benchmarks/LOWQ2/signal_training/test_model.py
+++ b/benchmarks/LOWQ2/signal_training/test_model.py
@@ -3,7 +3,7 @@ import numpy as np
 import onnxruntime as ort
 
 output_dir = 'plots/'
-model_base = "model_digitization"
+model_base = "model_digitization2"
 model_name = model_base+".onnx"
 # Load the ONNX model
 sess = ort.InferenceSession(model_name)
@@ -45,7 +45,7 @@ for j, input_tensor in enumerate(input_tensors[:,:,0:4]):
         output = output[0]
         output = output.reshape((1, 2, data_grid_size, data_grid_size))
 
-        round_output = np.round(output)
+        round_output = np.ceil(output-0.2)
         #round_output = output
 
         # Plot the output grid for the first channel
diff --git a/benchmarks/LOWQ2/signal_training/test_model_latent_space.py b/benchmarks/LOWQ2/signal_training/test_model_latent_space.py
index 4d7c877869d34b67996c093e2e6d67f187479a3e..606ee1742b0cc5b678229c122ead7b22b77345e5 100644
--- a/benchmarks/LOWQ2/signal_training/test_model_latent_space.py
+++ b/benchmarks/LOWQ2/signal_training/test_model_latent_space.py
@@ -1,4 +1,5 @@
 import matplotlib.pyplot as plt
+import matplotlib.colors as colors
 import numpy as np
 import onnxruntime as ort
 import uproot
@@ -6,18 +7,19 @@ import pandas as pd
 import tensorflow as tf
 
 output_dir = 'plots/'
-model_base = "model_tpx4_new3"
+model_base = "model_digitization"
 model_name = model_base+"_latent.onnx"
 # Load the ONNX model
 sess = ort.InferenceSession(model_name)
 
 condition_columns    = ['x', 'y', 'px', 'py']
+condition_ranges     = [[0, 0.5], [0, 0.5], [-0.2, 0.2], [-0.2, 0.2]]
 nConditions = len(condition_columns)
 
 input_name = sess.get_inputs()[0].name
 
 # Load data from the ROOT file
-file_path = 'output/Out_Convert_tpx4-6.root'
+file_path = 'output/Out_Convert_Big.root'
 output_dir = 'plots/'
 
 # Assuming the ROOT file structure: MCParticles and PixelHits trees
@@ -25,35 +27,15 @@ infile = uproot.open(file_path)
 tree  = infile['events']
 
 # Extracting data from the ROOT file
-df = tree.arrays(['x', 'y', 'px', 'py', 'pixel_x', 'pixel_y', 'charge', 'time'], library='pd')
+df = tree.arrays(['x', 'y', 'px', 'py', 'pixel_x', 'pixel_y', 'charge', 'time'], entry_stop=100000)
 
 data_grid_size = 6
 
-input_data = df[condition_columns].values.astype(np.float32)
-
-# Define a function to create a sparse tensor from a row
-def row_to_sparse_tensor(row):
-    charge_indices = np.column_stack([row['pixel_x'], row['pixel_y'], np.zeros(len(row['pixel_x']))])
-    time_indices   = np.column_stack([row['pixel_x'], row['pixel_y'], np.ones(len(row['pixel_x']))])
-    indices        = np.concatenate([charge_indices, time_indices])
-    values         = np.concatenate([row['charge'], row['time']])
-    dense_shape    = [data_grid_size, data_grid_size, 2]
-    sparse_tensor  = tf.sparse.reorder(tf.sparse.SparseTensor(indices, values, dense_shape))
-    return tf.sparse.to_dense(sparse_tensor)
-
-# Apply the function to each row of the DataFrame
-#target_tensors = df.apply(row_to_sparse_tensor, axis=1)
-target_tensors = tf.stack(df.apply(row_to_sparse_tensor, axis=1).to_list())
-
-# Reshape the target_tensors so that other than the event dimension, the other dimensions are the flat
-target_tensors = tf.reshape(target_tensors, (target_tensors.shape[0], -1)).numpy()
-
-# Create input tensors
+target_tensors = np.concatenate([df['charge'].to_numpy().astype(np.float32), df['time'].to_numpy().astype(np.float32)], axis=1)
+       
 conditions_tensors = df[condition_columns].to_numpy()
-#conditions_tensors = df[['x', 'y']].to_numpy()
-#conditions_tensors = df[[]].to_numpy()
-
-# Concatenate the conditions_tensors and target_tensors along final axis
+conditions_tensors = np.array([list(t) for t in conditions_tensors]).astype(np.float32)
+ 
 input_tensors = np.concatenate([conditions_tensors, target_tensors], axis=1)
 
 # Predict the output for the input tensor
@@ -64,20 +46,36 @@ nOutputs = output[0].shape[-1]
 # Plot 2D scatter plots showing the correlation between the first 2 latent dimensions
 plt.figure()
 plt.scatter(output[0][:,0], output[0][:,1])
+plt.xlim([-5, 5])
+plt.ylim([-5, 5])
 plt.xlabel('Latent dimension 1')
 plt.ylabel('Latent dimension 2')
-plt.savefig(output_dir + 'latent_space.png')
+plt.savefig(output_dir + model_base + '_latent_space-01.png')
+
+#Plot a histogram of each latent dimension on a grid
+fig, axs = plt.subplots(int(nOutputs/5),5, figsize=(20, 10))
+for i in range(nOutputs):
+    row=int(i//5)
+    col=int(i%5)
+    axs[row,col].hist(output[0][:,i], bins=400, range=(-5,5))
+    #axs[row,col].set_yscale('log')
+    axs[row,col].set_title('Latent dimension '+str(i))
 
 
 
+plt.savefig(output_dir + model_base + '_latent_histograms.png')
+
 # Plot 2D scatter plots showing the correlation between the conditions and output dimensions
 # A matrix of images
 
-fig, axs = plt.subplots(nConditions,nOutputs , figsize=(20, 10))
 for i in range(nConditions):
+    out_tag = model_base + '_latent_hist_'+condition_columns[i]
+    nRows = 5
+    fig, axs = plt.subplots(nRows, nOutputs//nRows, figsize=(20, 10))
     for j in range(nOutputs):
-        axs[i,j].scatter(input_data[:,i], output[0][:,j])
-        axs[i,j].set_xlabel(condition_columns[i])
-        axs[i,j].set_ylabel('Output '+str(j))
-plt.savefig(output_dir + 'scatter.png')
-
+        col = int(j//nRows)
+        row = int(j%nRows)
+        axs[row,col].hist2d(conditions_tensors[:,i], output[0][:,j], bins=(200, 200), cmap=plt.cm.jet, range=[condition_ranges[i], [-5, 5]])#, norm=colors.LogNorm())
+        axs[row,col].set_xlabel(condition_columns[i])
+        axs[row,col].set_ylabel('Output '+str(j))
+    plt.savefig(output_dir + out_tag + '.png')
diff --git a/benchmarks/LOWQ2/signal_training/test_model_ranges.py b/benchmarks/LOWQ2/signal_training/test_model_ranges.py
index b9ca4f27995dae70febddf0d29fd5e448912b71e..e3b7686b9b870d7924c9f56a5c08148f22ce38f1 100644
--- a/benchmarks/LOWQ2/signal_training/test_model_ranges.py
+++ b/benchmarks/LOWQ2/signal_training/test_model_ranges.py
@@ -24,7 +24,7 @@ infile = uproot.open(file_path)
 tree  = infile['events']
 
 # Extracting data from the ROOT file
-df = tree.arrays(['x', 'y', 'px', 'py', 'pixel_x', 'pixel_y', 'charge', 'time'], library='pd')
+df = tree.arrays(['x', 'y', 'px', 'py', 'pixel_x', 'pixel_y', 'charge', 'time'], library='pd')#, entry_stop=10000)
 
 input_data = df[['x', 'y', 'px', 'py']].values.astype(np.float32)
 #input_data = df[['x', 'y']].values.astype(np.float32)
@@ -34,8 +34,12 @@ output = sess.run(None, {input_name: input_data})
 output = output[0]
 output = output.reshape((len(input_data), 2, data_grid_size, data_grid_size))
 
-round_output = np.round(output)
-round_output = np.transpose(round_output, (0, 2, 3, 1))
+output = np.transpose(output, (0, 2, 3, 1))
+
+
+round_output = np.ceil(output-0.2)
+# round_output[:,:,:,0] = np.ceil(output[:,:,:,0])
+# round_output[:,:,:,1] = np.round(output[:,:,:,1])
 
 # Calculate the number of pixels with hits > 0 for each entry
 output_nhits = np.sum(round_output[:,:,:,0] > 0, axis=(1,2))
@@ -53,13 +57,15 @@ plt.xlabel('Charge')
 plt.ylabel('Number of entries')
 plt.savefig(output_dir + 'charge_distribution_pred.png')
 
+
 # Plot the time distribution
 plt.figure()
-plt.hist(round_output[round_output[:,:,:,0] > 0][...,1], bins=30, range=(0, 30))
+plt.hist(round_output[round_output[:,:,:,1] > 0][...,1], bins=30, range=(0, 30))
 plt.xlabel('Time')
 plt.ylabel('Number of entries')
 plt.savefig(output_dir + 'time_distribution_pred.png')
 
+
 input_tensors = np.array([[0.5, 0.5, 0.0, 0.0],[0.25, 0.25, 0.0, 0.0],[0.0, 0.0,-0.05,-0.05],[0.5, 0.1,0.0,0.0],[0.0, 0.5,0.05,0.05],[0.25, 0.5,0.05,0.05]], dtype=np.float32)
 
 input_range = np.array([0.05,0.05,0.01,0.01])
@@ -86,7 +92,7 @@ for j, input_tensor in enumerate(input_tensors[:,0:4]):
 
     # Plot the length of pixel_x for each entry
     plt.figure()
-    plt.hist(np.sum(round_output[input_indeces][:,:,:,0] > 0, axis=(1,2)), bins=12, range=(0, 12))
+    plt.hist(np.sum(round_output[input_indeces][:,:,:,1] > 0, axis=(1,2)), bins=12, range=(0, 12))
     plt.xlabel('Number of hit pixels')
     plt.ylabel('Number of entries')
     plt.savefig(output_dir + 'num_hit_pred_'+output_extension)
diff --git a/benchmarks/LOWQ2/signal_training/train_signal.py b/benchmarks/LOWQ2/signal_training/train_signal.py
index 1b799422bfaef404a20784e96ebfb126e8139ad3..24113357b49c4e1f09408decae223567deac6cb5 100644
--- a/benchmarks/LOWQ2/signal_training/train_signal.py
+++ b/benchmarks/LOWQ2/signal_training/train_signal.py
@@ -13,9 +13,9 @@ from model_ad import VAE
 from model_ad import Generator
 from model_ad import LatentSpace
 
-epochs = 400
+epochs = 200
 batch_size = 5000
-model_path = 'model_digitization'
+model_path = 'model_digitization_genprop'
 data_grid_size = 6
 
 condition_columns = ['x', 'y', 'px', 'py']
@@ -25,10 +25,10 @@ nconditions = len(condition_columns)
 nInput = nconditions + data_grid_size*data_grid_size*2
 
 # Load data from the ROOT file
-file_path = 'output/Out_Convert_Big.root'
+file_path = 'output/Out_Convert_genprop.root'
 
 #vae = create_model()
-vae = VAE(latent_dim=20,nconditions=nconditions,grid_size=data_grid_size)
+vae = VAE(latent_dim=10,nconditions=nconditions,grid_size=data_grid_size)
 
 #vae.compile(optimizer=Adam())
 vae.compile(r_optimizer=Adam(),a_optimizer=Adam())
@@ -39,53 +39,14 @@ with uproot.open(file_path) as file:
 
     # Extracting data from the ROOT file
     #df = tree.arrays(['x', 'y', 'px', 'py', 'pixel_x', 'pixel_y', 'charge', 'time'], library='pd')
-    df = tree.arrays(['x', 'y', 'px', 'py', 'charge', 'time'], entry_stop=9000000)
-
-    #limit the number of rows
-    #df = df.head(10000)
-    
-
-    # Normalize the 'x', 'y', 'px', and 'py' columns
-    #df['x']  = (df['x'] - df['x'].min()) / (df['x'].max() - df['x'].min())
-    #df['y']  = (df['y'] - df['y'].min()) / (df['y'].max() - df['y'].min())
-    #df['px'] = (df['px'] - df['px'].min()) / (df['px'].max() - df['px'].min())
-    #df['py'] = (df['py'] - df['py'].min()) / (df['py'].max() - df['py'].min())
-
-    # # Define a function to create a sparse tensor from a row
-    # def row_to_sparse_tensor(row):
-    #     charge_indices = np.column_stack([row['pixel_x'], row['pixel_y'], np.zeros(len(row['pixel_x']))])
-    #     time_indices = np.column_stack([row['pixel_x'], row['pixel_y'], np.ones(len(row['pixel_x']))])
-    #     indices = np.concatenate([charge_indices, time_indices])
-    #     values = np.concatenate([row['charge'], row['time']])
-    #     dense_shape = [data_grid_size, data_grid_size, 2]
-    #     sparse_tensor = tf.sparse.reorder(tf.sparse.SparseTensor(indices, values, dense_shape))
-    #     return tf.sparse.to_dense(sparse_tensor)
+    df = tree.arrays(['x', 'y', 'px', 'py', 'charge', 'time'], entry_stop=1000000)
     
-    # # Define a function to create a 2D histogram from a row
-    # def row_to_histogram(row):
-    #     charge_hist, _, _ = np.histogram2d(row['pixel_x'], row['pixel_y'], bins=data_grid_size, weights=row['charge'])
-    #     time_hist, _, _ = np.histogram2d(row['pixel_x'], row['pixel_y'], bins=data_grid_size, weights=row['time'])
-    #     return np.stack([charge_hist, time_hist], axis=-1)
-
-    # # Apply the function to each row of the DataFrame
     target_tensors = np.concatenate([df['charge'].to_numpy(), df['time'].to_numpy()], axis=1)
-    
-    # Reshape the target_tensors so that other than the event dimension, the other dimensions are the flat
-    #target_tensors = target_tensors.reshape((target_tensors.shape[0], -1))
-
-    # # Apply the function to each row of the DataFrame
-    # #target_tensors = df.apply(row_to_sparse_tensor, axis=1)
-    # target_tensors = tf.stack(df.apply(row_to_sparse_tensor, axis=1).to_list())
-
-    # # Reshape the target_tensors so that other than the event dimension, the other dimensions are the flat
-    # target_tensors = tf.reshape(target_tensors, (target_tensors.shape[0], -1)).numpy()
-
+  
     # Create input tensors
     conditions_tensors = df[condition_columns].to_numpy()
     conditions_tensors = np.array([list(t) for t in conditions_tensors])
-    #conditions_tensors = df[['x', 'y']].to_numpy()
-    #conditions_tensors = df[[]].to_numpy()
-
+  
     # Concatenate the conditions_tensors and target_tensors along final axis
     input_tensors = np.concatenate([conditions_tensors, target_tensors], axis=1)