diff --git a/.gitignore b/.gitignore
index 49b025bd..cb0a0aa5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,8 @@ __pycache__
 .vscode
 dynamicCode
 linux64GccDPInt32Opt
+.editorconfig
+libtorch*
 
 # Compiled Object files
 *.slo
@@ -34,3 +36,7 @@ linux64GccDPInt32Opt
 *.exe
 *.out
 *.app
+
+# Data
+*.csv
+log.*
diff --git a/2022-07/physics-based-dl-team-solution-03-4/Allwmake b/2022-07/physics-based-dl-team-solution-03-4/Allwmake
index 5283d459..1281a7e8 100755
--- a/2022-07/physics-based-dl-team-solution-03-4/Allwmake
+++ b/2022-07/physics-based-dl-team-solution-03-4/Allwmake
@@ -25,5 +25,5 @@ then
     unzip libtorch-cxx11-abi-shared-with-deps-1.7.1+cpu.zip
 fi
 
-wmake $WITH_BEAR all applications
+wmake $WITH_BEAR all application
 
diff --git a/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/pinnPotentialFoam.C b/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/pinnPotentialFoam.C
index 8816edca..52541f46 100644
--- a/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/pinnPotentialFoam.C
+++ b/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/pinnPotentialFoam.C
@@ -51,6 +51,7 @@ Description
 // libtorch-OpenFOAM data transfer
 #include "torchFunctions.C"
 #include "fileNameGenerator.H"
+#include "torchDifferentialOperators.C"
 
 using namespace Foam;
 using namespace torch::indexing;
@@ -179,10 +180,12 @@ int main(int argc, char *argv[])
         mesh.nCells(),
         torch::TensorOptions().dtype(at::kLong)
     );
+
+ 
     // - Randomly select 10 % of all cell centers for training.
     long int n_cells = int(0.1 * mesh.nCells());
     torch::Tensor training_indices = shuffled_indices.index({Slice(0, n_cells)});
-
+    
     // - Use 10% of random indices to select the training_data from Phi_tensor
     torch::Tensor O_training = O_tensor.index(training_indices);
     O_training.requires_grad_(true);
@@ -198,7 +201,6 @@ int main(int argc, char *argv[])
     torch::Tensor mse = torch::zeros_like(O_training);
 
 
-
     size_t epoch = 1;
     double min_mse = 1.;
 
@@ -237,73 +239,75 @@ int main(int argc, char *argv[])
         );
         */
         
-        //grad(Ux) = gradient of scalar component Ux w.r.t (x,y,z)
-        auto Ux_predict_grad = torch::autograd::grad(
-           {O_predict.index({Slice(),0})},//N_{train} x 1
-           {cc_training}, // N_{train} x 3
-           {torch::ones_like(O_training.index({Slice(),0}))}, // N_{train} x 1
-           true,
-           true
-        );
+        // grad(Ux) = gradient of scalar component Ux w.r.t (x,y,z)
+        // auto Ux_predict_grad = torch::autograd::grad(
+        //    {O_predict.index({Slice(),0})},//N_{train} x 1
+        //    {cc_training}, // N_{train} x 3
+        //    {torch::ones_like(O_training.index({Slice(),0}))}, // N_{train} x 1
+        //    true,
+        //    true
+        // );
         
-        //grad(Uy) = gradient of scalar component Uy w.r.t (x,y,z)
-        auto Uy_predict_grad = torch::autograd::grad(
-           {O_predict.index({Slice(),1})},//N_{train} x 1
-           {cc_training}, // N_{train} x 3
-           {torch::ones_like(O_training.index({Slice(),1}))}, // N_{train} x 1
-           true,
-           true
-        );
+        // //grad(Uy) = gradient of scalar component Uy w.r.t (x,y,z)
+        // auto Uy_predict_grad = torch::autograd::grad(
+        //    {O_predict.index({Slice(),1})},//N_{train} x 1
+        //    {cc_training}, // N_{train} x 3
+        //    {torch::ones_like(O_training.index({Slice(),1}))}, // N_{train} x 1
+        //    true,
+        //    true
+        // );
                 
-        //grad(Uz) = gradient of scalar component Uz w.r.t (x,y,z)
-        auto Uz_predict_grad = torch::autograd::grad(
-           {O_predict.index({Slice(),2})},//N_{train} x 1
-           {cc_training}, // N_{train} x 3
-           {torch::ones_like(O_training.index({Slice(),2}))}, // N_{train} x 1
-           true,
-           true
-        );
+        // //grad(Uz) = gradient of scalar component Uz w.r.t (x,y,z)
+        // auto Uz_predict_grad = torch::autograd::grad(
+        //    {O_predict.index({Slice(),2})},//N_{train} x 1
+        //    {cc_training}, // N_{train} x 3
+        //    {torch::ones_like(O_training.index({Slice(),2}))}, // N_{train} x 1
+        //    true,
+        //    true
+        // );
         
-        auto divU = Ux_predict_grad[0].index({Slice(), 0}) + Uy_predict_grad[0].index({Slice(), 1}) + Uz_predict_grad[0].index({Slice(), 2});
+        const auto divU = Foam::AI::div(O_predict.index({Slice(),0}), O_predict.index({Slice(),1}), O_predict.index({Slice(),2}), cc_training);
+        // auto divU = Ux_predict_grad[0].index({Slice(), 0}) + Uy_predict_grad[0].index({Slice(), 1}) + Uz_predict_grad[0].index({Slice(), 2});
         
         
         // grad(Phi) = gradient of the scalar potenial Phi w.r. (x,y,z)
-        auto Phi_predict_grad = torch::autograd::grad(
-           {O_predict.index({Slice(),3})},//N_{train} x 1
-           {cc_training}, // N_{train} x 3
-           {torch::ones_like(O_training.index({Slice(),3}))}, // N_{train} x 1
-           true,
-           true
-        );
+        // auto Phi_predict_grad = torch::autograd::grad(
+        //    {O_predict.index({Slice(),3})},//N_{train} x 1
+        //    {cc_training}, // N_{train} x 3
+        //    {torch::ones_like(O_training.index({Slice(),3}))}, // N_{train} x 1
+        //    true,
+        //    true
+        // );
         
         
-        auto Phi_predict_grad_x_grad = torch::autograd::grad(
-           {Phi_predict_grad[0].index({Slice(),0})},//N_{train} x 1
-           {cc_training}, // N_{train} x 3
-           {torch::ones_like(Phi_predict_grad[0].index({Slice(),0}))}, // N_{train} x 1
-           true,
-           true
-        );
+        // auto Phi_predict_grad_x_grad = torch::autograd::grad(
+        //    {Phi_predict_grad[0].index({Slice(),0})},//N_{train} x 1
+        //    {cc_training}, // N_{train} x 3
+        //    {torch::ones_like(Phi_predict_grad[0].index({Slice(),0}))}, // N_{train} x 1
+        //    true,
+        //    true
+        // );
         
         
-         auto Phi_predict_grad_y_grad = torch::autograd::grad(
-           {Phi_predict_grad[0].index({Slice(),1})},//N_{train} x 1
-           {cc_training}, // N_{train} x 3
-           {torch::ones_like(Phi_predict_grad[0].index({Slice(),1}))}, // N_{train} x 1
-           true,
-           true
-        );
-        
-         auto Phi_predict_grad_z_grad = torch::autograd::grad(
-           {Phi_predict_grad[0].index({Slice(),2})},//N_{train} x 1
-           {cc_training}, // N_{train} x 3
-           {torch::ones_like(Phi_predict_grad[0].index({Slice(),2}))}, // N_{train} x 1
-           true,
-           true
-        );
+        //  auto Phi_predict_grad_y_grad = torch::autograd::grad(
+        //    {Phi_predict_grad[0].index({Slice(),1})},//N_{train} x 1
+        //    {cc_training}, // N_{train} x 3
+        //    {torch::ones_like(Phi_predict_grad[0].index({Slice(),1}))}, // N_{train} x 1
+        //    true,
+        //    true
+        // );
         
+        //  auto Phi_predict_grad_z_grad = torch::autograd::grad(
+        //    {Phi_predict_grad[0].index({Slice(),2})},//N_{train} x 1
+        //    {cc_training}, // N_{train} x 3
+        //    {torch::ones_like(Phi_predict_grad[0].index({Slice(),2}))}, // N_{train} x 1
+        //    true,
+        //    true
+        // );
         
-        auto laplacePhi = Phi_predict_grad_x_grad[0].index({Slice(), 0}) + Phi_predict_grad_y_grad[0].index({Slice(), 1}) + Phi_predict_grad_z_grad[0].index({Slice(), 2});
+        const auto laplacePhi = Foam::AI::laplacian(O_predict.index({Slice(),3}), cc_training);
+
+        // auto laplacePhi = Phi_predict_grad_x_grad[0].index({Slice(), 0}) + Phi_predict_grad_y_grad[0].index({Slice(), 1}) + Phi_predict_grad_z_grad[0].index({Slice(), 2});
         // Compute the data mse loss.
         
         // O = [ux, uy, uz, Phi], O_pred = [Ux_nn, Uy_nn, Uz_nn, Phi_nn], Mse_data = Sum(Ux - Ux_nn)^2 / N_mesh + Sum(Uy - Uy_nn)^2 / N_mesh + Sum(Uz - Uz_nn)^2 / N_mesh + Sum(Phi - Phi_nn)^2 / N_mesh
@@ -333,7 +337,7 @@ int main(int argc, char *argv[])
             << "U MSE = " << mse_grad.item<double>() << "\n"
             << "Training MSE = " << mse.item<double>() << "\n";
 
-        std::cout << at::size(Ux_predict_grad[0],0) << at::size(Uy_predict_grad[0],0) << at::size(Uz_predict_grad[0],0) << at::size(Phi_predict_grad[0],0) << "\n";
+        // std::cout << at::size(Ux_predict_grad[0],0) << at::size(Uy_predict_grad[0],0) << at::size(Uz_predict_grad[0],0) << at::size(Phi_predict_grad[0],0) << "\n";
         // Write the hiddenLayers_ network structure as a string-formatted python list.
         
         std::cout << at::size(divU,0) << "\n";
diff --git a/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/torchDifferentialOperators.C b/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/torchDifferentialOperators.C
new file mode 100644
index 00000000..5d4f6b9a
--- /dev/null
+++ b/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/torchDifferentialOperators.C
@@ -0,0 +1,127 @@
+/*---------------------------------------------------------------------------*\
+  =========                 |
+  \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
+   \\    /   O peration     |
+    \\  /    A nd           | www.openfoam.com
+     \\/     M anipulation  |
+-------------------------------------------------------------------------------
+    Copyright (C) 2021 Tomislav Maric, TU Darmstadt
+-------------------------------------------------------------------------------
+License
+    This file is part of OpenFOAM.
+
+    OpenFOAM is free software: you can redistribute it and/or modify it
+    under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
+    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+    for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with OpenFOAM.  If not, see <http://www.gnu.org/licenses/>.
+
+Description
+    Utility functions data exchange between libtorch and OpenFOAM.
+
+SourceFiles
+    torchDifferentialOperators.
+
+\*---------------------------------------------------------------------------*/
+
+#include "torchDifferentialOperators.H"
+
+using namespace torch::indexing;
+
+namespace Foam {
+namespace AI {
+torch::Tensor
+div(const torch::Tensor& vel_vec,
+    const torch::Tensor& input)
+{
+
+  const auto u = vel_vec.index({ Slice(), 0 });
+  const auto v = vel_vec.index({ Slice(), 1 });
+  const auto w = vel_vec.index({ Slice(), 2 });
+
+  const auto u_grad = torch::autograd::grad(
+    { u }, { input }, { torch::ones_like(u) }, true, true);
+  const auto v_grad = torch::autograd::grad(
+    { v }, { input }, { torch::ones_like(v) }, true, true);
+  const auto w_grad = torch::autograd::grad(
+    { w }, { input }, { torch::ones_like(w) }, true, true);
+
+  const auto div_vel = u_grad[0].index({ Slice(), 0 }) +
+                 v_grad[0].index({ Slice(), 1 }) +
+                 w_grad[0].index({ Slice(), 2 });
+
+  return div_vel;
+}
+
+torch::Tensor
+div(const torch::Tensor& u,
+    const torch::Tensor& v,
+    const torch::Tensor& w,
+    const torch::Tensor& input)
+{
+
+  const auto u_grad = torch::autograd::grad(
+    { u }, { input }, { torch::ones_like(u) }, true, true);
+  const auto v_grad = torch::autograd::grad(
+    { v }, { input }, { torch::ones_like(v) }, true, true);
+  const auto w_grad = torch::autograd::grad(
+    { w }, { input }, { torch::ones_like(w) }, true, true);
+
+  const auto div_vel = u_grad[0].index({ Slice(), 0 }) +
+                 v_grad[0].index({ Slice(), 1 }) +
+                 w_grad[0].index({ Slice(), 2 });
+
+  return div_vel;
+}
+
+torch::Tensor
+laplacian(const torch::Tensor& var,
+          const torch::Tensor& input)
+{
+
+  const auto var_grad = torch::autograd::grad(
+    { var },
+    { input },
+    { torch::ones_like(var) },
+    true,
+    true);
+
+  // compute second derivatives required for laplacian
+  const auto grad_x_var_grad = torch::autograd::grad(
+    { var_grad[0].index({ Slice(), 0 }) },
+    { input },
+    { torch::ones_like(var_grad[0].index({ Slice(), 0 })) },
+    true,
+    true);
+  const auto grad_y_var_grad = torch::autograd::grad(
+    { var_grad[0].index({ Slice(), 1 }) },
+    { input },
+    { torch::ones_like(var_grad[0].index({ Slice(), 1 })) },
+    true,
+    true);
+  const auto grad_z_var_grad = torch::autograd::grad(
+    { var_grad[0].index({ Slice(), 2 }) },
+    { input },
+    { torch::ones_like(var_grad[0].index({ Slice(), 2 })) },
+    true,
+    true);
+
+  // compute laplacian
+  const auto laplacian_var = grad_x_var_grad[0].index({ Slice(), 0 }) +
+                       grad_y_var_grad[0].index({ Slice(), 1 }) +
+                       grad_z_var_grad[0].index({ Slice(), 2 });
+
+  return laplacian_var;
+}
+
+} // namespace AI
+} // namespace Foam
+
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
\ No newline at end of file
diff --git a/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/torchDifferentialOperators.H b/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/torchDifferentialOperators.H
new file mode 100644
index 00000000..0e34cef6
--- /dev/null
+++ b/2022-07/physics-based-dl-team-solution-03-4/application/pinnPotentialFoam/torchDifferentialOperators.H
@@ -0,0 +1,83 @@
+/*---------------------------------------------------------------------------*\
+  =========                 |
+  \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
+   \\    /   O peration     |
+    \\  /    A nd           | www.openfoam.com
+     \\/     M anipulation  |
+-------------------------------------------------------------------------------
+    Copyright (C) 2021 Tomislav Maric, TU Darmstadt
+-------------------------------------------------------------------------------
+License
+    This file is part of OpenFOAM.
+
+    OpenFOAM is free software: you can redistribute it and/or modify it
+    under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
+    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+    for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with OpenFOAM.  If not, see <http://www.gnu.org/licenses/>.
+
+Description
+    Utility functions data exchange between libtorch and OpenFOAM.
+
+SourceFiles
+    torchDifferentialOperators.
+
+\*---------------------------------------------------------------------------*/
+
+#ifndef torchDifferentialOperators_H
+#define torchDifferentialOperators_H
+
+#include <torch/torch.h>
+
+#include "vector.H"
+
+namespace Foam {
+namespace AI {
+
+/** \brief Compute the divergence of a vector field.
+ *  \param[in] vel_vec The velocity vector field.
+ *  \param[in] input The input field.
+ *  \return The divergence of the input field.
+ */
+torch::Tensor
+div(const torch::Tensor& vel_vec,
+    const torch::Tensor& input);
+
+/** \brief Compute the divergence of a vector field given as three scalar fields (u, v, w).
+ *  \param[in] The scalar field: u
+ *  \param[in] The scalar field: v
+ *  \param[in] The scalar field: w
+ *  \param[in] input The input field.
+ *  \return The divergence of the input field.
+ * \overload
+ */
+torch::Tensor
+div(const torch::Tensor& u,
+    const torch::Tensor& v,
+    const torch::Tensor& w,
+    const torch::Tensor& input);
+
+/** \brief Compute the laplacian of a scalar field.
+ *  \param[in] var The scalar field.
+ *  \param[in] input The input field.
+ *  \return The laplacian of the input field.
+ */
+torch::Tensor
+laplacian(const torch::Tensor& var,
+          const torch::Tensor& input,
+          const torch::Tensor& grad_output);
+} // namespace AI
+} // namespace Foam
+
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
+
+#endif
+
+// ************************************************************************* //
diff --git a/2022-07/physics-based-dl-team-solution-03-4/run/Cylinder/system/fvSolution b/2022-07/physics-based-dl-team-solution-03-4/run/Cylinder/system/fvSolution
index cf96e554..488eeee9 100644
--- a/2022-07/physics-based-dl-team-solution-03-4/run/Cylinder/system/fvSolution
+++ b/2022-07/physics-based-dl-team-solution-03-4/run/Cylinder/system/fvSolution
@@ -35,5 +35,14 @@ potentialFlow
     nNonOrthogonalCorrectors 3;
 }
 
+AI
+{
+    // Uncomment when parameterizing
+    // via the dictionary.
+    maxIterations 5000;
+    hiddenLayers (30 30 30 30 30);
+    optimizerStep 1e-04;
+}
+
 
 // ************************************************************************* //