Contents
Diffusion-Reaction
Introduction
This tutorial provides a step-by-step guide for implementing a diffusion-reaction problem using the Finite Element Method (FEM) in the deal.II library. We will focus on setting up the weak form of the equations, assembling the finite element system, applying appropriate boundary conditions, and solving the resulting linear system. This tutorial assumes familiarity with basic FEM concepts and provides a practical foundation for more advanced developments within the deal.II framework.
Theory
TBA
Implementation
Full code breakdown TBA
Results
Results for various Diffusion-Reaction constants TBA
Complete code
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
#include <deal.II/base/timer.h>
#include <deal.II/lac/generic_linear_algebra.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/fe/fe_system.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/grid/grid_refinement.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/error_estimator.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/index_set.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/distributed/grid_refinement.h>
#include <deal.II/distributed/solution_transfer.h>
#include <deal.II/base/quadrature_point_data.h>
#include <deal.II/base/tensor_function.h>
#include <fstream>
#include <iostream>
#include <random>
namespace diff
{
using namespace dealii;
template <int dim>
class Diffusion
{
public:
Diffusion();
void run();
private:
// Mesh generation and refinement
void create_mesh();
void refine_mesh();
// Diffusion problem functions
void set_initial_data();
void setup_constraints (); // Posso creare funz che applica BC o mettere a fine assembly matrix
void setup();
void assemble();
void solve_linear_problem();
void evaluate_reactions();
// Save the results - Disp, stress, reaction, energies
void output_results(const unsigned int time) const;
// Parallel setup and print on screen
MPI_Comm mpi_communicator;
const unsigned int n_mpi_processes;
const unsigned int this_mpi_process;
parallel::distributed::Triangulation<dim> triangulation;
// Objects for Diffusion
const FESystem<dim> fe; // FE System
DoFHandler<dim> dof_handler; // DoF Handler
IndexSet locally_owned_dofs; // IndexSet - Locally Owned
IndexSet locally_relevant_dofs; // IndexSet - Locally relevant
AffineConstraints<double> constraints; // Affine Constraint
PETScWrappers::MPI::SparseMatrix system_matrix; // diffusion Matrix
PETScWrappers::MPI::Vector system_rhs; // RHS
PETScWrappers::MPI::Vector local_solution_C1; // MPI Split solution
PETScWrappers::MPI::Vector local_solution_old_C1; // MPI Split solution
PETScWrappers::MPI::Vector distributed_solution_C1; // Full Solution
PETScWrappers::MPI::Vector distributed_old_C1; // This is for the refinement
PETScWrappers::MPI::Vector local_solution_C2; // MPI Split solution
PETScWrappers::MPI::Vector local_solution_old_C2; // MPI Split solution
PETScWrappers::MPI::Vector distributed_solution_C2; // Full Solution
PETScWrappers::MPI::Vector distributed_old_C2; // This is for the refinement
PETScWrappers::MPI::Vector local_solution_C3; // MPI Split solution
PETScWrappers::MPI::Vector local_solution_old_C3; // MPI Split solution
PETScWrappers::MPI::Vector distributed_solution_C3; // Full Solution
PETScWrappers::MPI::Vector distributed_old_C3; // This is for the refinement
const QGauss<dim> quadrature_formula; // Quadrature formula
// Domain dimensions
double L = 1;
double H = 1;
double W = 1;
const unsigned int nx = 30;
const unsigned int ny = 30;
const unsigned int nz = 30;
// Diffusion coefficients [m^2/s]
double D_x = 5e-2;
double D_y = 1e-2;
double D_z = 5e-3;
// Reaction coefficient [m^3/(mol s)]
double K_12 = 10;
// Initial concentrations [mol/m^3]
double C1_0 = 1;
double C2_0 = 0;
double C3_0 = 0;
// Boundary/Equilibrium concentration [mol/m^3]
double C1_eq = 0.2;
double C2_bc = 1;
// Time stepping [s]
double dt = 0.1;
double T_tot = 10;
const unsigned int n_steps = static_cast<unsigned int>(std::round(T_tot / dt));
ConditionalOStream pcout;
TimerOutput computing_timer;
}; // End of diff class
// Constructor
template <int dim>
Diffusion<dim>::Diffusion()
: mpi_communicator(MPI_COMM_WORLD)
, n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator))
, this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
, triangulation (mpi_communicator)
, fe (FE_Q<dim> (1), 1)
, dof_handler (triangulation)
, quadrature_formula (fe.degree + 1)
, pcout (std::cout, this_mpi_process == 0)
, computing_timer (mpi_communicator, pcout, TimerOutput::never, TimerOutput::wall_times)
{}
// -----------------------------
// ----------- MESH ------------
// -----------------------------
template <int dim>
void Diffusion<dim>::create_mesh()
{
// 1) Definisco la suddivisione in base a dim
std::vector<unsigned int> repetitions;
if constexpr (dim == 2)
repetitions = {nx, ny};
else // dim == 3
repetitions = {nx, ny, nz};
// 2) Crea la mesh
Point<dim> p1, p2;
if constexpr (dim == 2)
{
p1 = Point<dim>(0, 0);
p2 = Point<dim>(L, H);
}
else
{
p1 = Point<dim>(0, 0, 0);
p2 = Point<dim>(L, H, W);
}
GridGenerator::subdivided_hyper_rectangle(triangulation,
repetitions,
p1, p2);
// Set Fce IDs
for (const auto &cell : triangulation.active_cell_iterators ())
{
for (const auto &face : cell->face_iterators ())
{
if (face->at_boundary ())
{
const auto center = face->center ();
if (std::fabs (center(0) - 0) < 1e-12)
face->set_boundary_id (0);
else if (std::fabs (center(0) - L) < 1e-12)
face->set_boundary_id (1);
else if (std::fabs (center(1) - 0) < 1e-12)
face->set_boundary_id (2);
else if (std::fabs (center(1) - H) < 1e-12)
face->set_boundary_id (3);
// Controlla se siamo in 3D prima di accedere a center(2)
if constexpr (dim == 3)
{
if (std::fabs(center(2) - 0) < 1e-12)
face->set_boundary_id(4);
else if (std::fabs(center(2) - W) < 1e-12)
face->set_boundary_id(5);
}
}
}
}
// Initialize dof handler and extact Owned/Relevant DoFs
dof_handler.distribute_dofs (fe);
locally_owned_dofs = dof_handler.locally_owned_dofs ();
locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs (dof_handler);
// Initialize the old solution for updates purposes
local_solution_old_C1.reinit (locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
local_solution_old_C2.reinit (locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
local_solution_old_C3.reinit (locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
// Initialize the old solution for refinement purposes
distributed_old_C1.reinit (locally_owned_dofs, mpi_communicator);
distributed_old_C2.reinit (locally_owned_dofs, mpi_communicator);
distributed_old_C3.reinit (locally_owned_dofs, mpi_communicator);
pcout << "No. of levels in triangulation: " << triangulation.n_global_levels () << std::endl;
pcout << " Number of locally owned cells on the process: " << triangulation.n_locally_owned_active_cells () << std::endl;
pcout << "Number of global cells:" << triangulation.n_global_active_cells () << std::endl;
pcout << " Total Number of globally active cells: " << triangulation.n_global_active_cells () << std::endl
<< " Number of degrees of freedom for Diffusion: " << dof_handler.n_dofs () << std::endl;
}
template <int dim>
void Diffusion<dim>::refine_mesh()
{
// 1) Stima dell’errore su C2
Vector<float> estimated_error_per_cell(triangulation.n_locally_owned_active_cells());
KellyErrorEstimator<dim>::estimate(
dof_handler,
QGauss<dim-1>(fe.degree + 1),
std::map<types::boundary_id, const Function<dim>*>(),
local_solution_C2,
estimated_error_per_cell);
// 2) Prepare il solution transfer (ghosted!)
parallel::distributed::SolutionTransfer<dim, PETScWrappers::MPI::Vector> soltransC1(dof_handler);
parallel::distributed::SolutionTransfer<dim, PETScWrappers::MPI::Vector> soltransC2(dof_handler);
parallel::distributed::SolutionTransfer<dim, PETScWrappers::MPI::Vector> soltransC3(dof_handler);
// 3) Refinement/coarsening
parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction(
triangulation, estimated_error_per_cell,
0.25, 0.1);
if (triangulation.n_global_levels() >= 4)
{
for (const auto &cell : triangulation.active_cell_iterators_on_level(3))
if (cell->is_locally_owned())
cell->clear_refine_flag();
}
// 4) Esegui coarsening/refinement
triangulation.prepare_coarsening_and_refinement();
soltransC1.prepare_for_coarsening_and_refinement (local_solution_C1);
soltransC2.prepare_for_coarsening_and_refinement (local_solution_C2);
soltransC3.prepare_for_coarsening_and_refinement (local_solution_C3);
triangulation.execute_coarsening_and_refinement();
// 5) Redistribuisci DoF
dof_handler.distribute_dofs(fe);
locally_owned_dofs = dof_handler.locally_owned_dofs();
locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler);
// 6) Reinit dei nuovi vettori distrib
distributed_old_C1.reinit(locally_owned_dofs, mpi_communicator);
distributed_old_C2.reinit(locally_owned_dofs, mpi_communicator);
distributed_old_C3.reinit(locally_owned_dofs, mpi_communicator);
soltransC1.interpolate (distributed_old_C1);
soltransC2.interpolate (distributed_old_C2);
soltransC3.interpolate (distributed_old_C3);
// 8) Riapplico vincoli
setup();
constraints.distribute(distributed_old_C1);
constraints.distribute(distributed_old_C2);
constraints.distribute(distributed_old_C3);
// 9) Aggiorna anche i ghost
local_solution_old_C1.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
local_solution_old_C2.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
local_solution_old_C3.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
local_solution_C1 = distributed_old_C1; // Per output
local_solution_C2 = distributed_old_C2;
local_solution_C3 = distributed_old_C3;
local_solution_old_C1 = distributed_old_C1; // Per estrarre valore sol al passo next
local_solution_old_C2 = distributed_old_C2;
local_solution_old_C3 = distributed_old_C3;
}
// -----------------------------
// -------- Diffusion ---------
// -----------------------------
template<int dim>
void Diffusion<dim>::setup_constraints ()
{
constraints.clear();
constraints.reinit(locally_relevant_dofs);
DoFTools::make_hanging_node_constraints(dof_handler, constraints);
constraints.close();
}
template<int dim>
void Diffusion<dim>::setup()
{
TimerOutput::Scope ts(computing_timer, "setup_system");
// Initialize the dofs contained locally as well as those of the near cells
locally_owned_dofs = dof_handler.locally_owned_dofs ();
locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs (dof_handler);
local_solution_C1.reinit (locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
local_solution_C2.reinit (locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
local_solution_C3.reinit (locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
// Initialize the contraints objects for BC and Hanging Nodes during refinement
system_rhs.reinit (locally_owned_dofs, mpi_communicator);
distributed_solution_C1.reinit (locally_owned_dofs, mpi_communicator);
distributed_solution_C2.reinit (locally_owned_dofs, mpi_communicator);
distributed_solution_C3.reinit (locally_owned_dofs, mpi_communicator);
setup_constraints (); // Qui unizializzo gli hanging nodes constrains (e le BC volendo)
// Sparsity pattern initialization (where matrix has nonzero entries)
DynamicSparsityPattern dsp(locally_relevant_dofs);
DoFTools::make_sparsity_pattern (dof_handler,
dsp,
constraints,
false);
SparsityTools::distribute_sparsity_pattern (dsp,
dof_handler.locally_owned_dofs (),
mpi_communicator,
locally_relevant_dofs);
// Diffusion Matrix
system_matrix.reinit (locally_owned_dofs,
locally_owned_dofs,
dsp,
mpi_communicator);
}
template <int dim>
void Diffusion<dim>::set_initial_data()
{
TimerOutput::Scope ts(computing_timer, "set_initial_data");
// 1) Inizializza i vettori globali con i valori iniziali costanti
distributed_solution_C1 = C1_0;
distributed_solution_C2 = C2_0;
distributed_solution_C3 = C3_0;
// 2) Sincronizza i ghost‑values sui vettori globali
distributed_solution_C1.compress(VectorOperation::insert);
distributed_solution_C2.compress(VectorOperation::insert);
distributed_solution_C3.compress(VectorOperation::insert);
// 3) Applica le condizioni di Dirichlet (especialmente su C2)
// Questo usa setup_constraints e constraints già definiti
setup_constraints();
constraints.distribute(distributed_solution_C1);
constraints.distribute(distributed_solution_C2);
constraints.distribute(distributed_solution_C3);
// 4) Copia nei vettori locali “split” e in quelli “old”
local_solution_C1 = distributed_solution_C1;
local_solution_C2 = distributed_solution_C2;
local_solution_C3 = distributed_solution_C3;
local_solution_old_C1 = distributed_solution_C1;
local_solution_old_C2 = distributed_solution_C2;
local_solution_old_C3 = distributed_solution_C3;
// 5) Aggiorna i ghost‑values sui vettori locali
local_solution_C1.update_ghost_values();
local_solution_C2.update_ghost_values();
local_solution_C3.update_ghost_values();
local_solution_old_C1.update_ghost_values();
local_solution_old_C2.update_ghost_values();
local_solution_old_C3.update_ghost_values();
}
template<int dim>
void Diffusion<dim>::assemble()
{
TimerOutput::Scope ts (computing_timer, "assembly");
// Clean the current matrices
system_rhs = 0;
system_matrix = 0;
// Initialize the FEValues Objects
FEValues<dim> fe_values (fe, quadrature_formula,
update_values | update_gradients |
update_quadrature_points | update_JxW_values);
// Number of DoFs and Gauss points per cell
const unsigned int n_q_points = quadrature_formula.size ();
const unsigned int dofs_per_cell = fe.n_dofs_per_cell ();
// Local Matrices and Vector
FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
Vector<double> cell_rhs (dofs_per_cell);
// Save cell values for C1
std::vector<double> sol_C1(n_q_points);
std::vector<double> sol_C2(n_q_points);
// Create vector to store the local dof indices
std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
// Create the diffusion tensor
SymmetricTensor<2, dim> diffusion_tensor;
// Set values on the diagonal based on the dimension
if constexpr (dim == 2)
{
diffusion_tensor[0][0] = D_x; // component xx
diffusion_tensor[1][1] = D_y; // component yy
}
else if constexpr (dim == 3)
{
diffusion_tensor[0][0] = D_x; // component xx
diffusion_tensor[1][1] = D_y; // component yy
diffusion_tensor[2][2] = D_z; // component zz
}
// loop over the cells
for (const auto &cell : dof_handler.active_cell_iterators ())
{
if (cell->is_locally_owned ())
{
// Reset the local matrix/rhs of the cell
cell_matrix = 0;
cell_rhs = 0;
fe_values.reinit (cell);
fe_values.get_function_values(local_solution_old_C1, sol_C1);
fe_values.get_function_values(local_solution_old_C2, sol_C2);
// loop over the gauss point of the cell
for (const unsigned int q_point : fe_values.quadrature_point_indices ())
{
double C1_conc = std::max(sol_C1[q_point] - C1_eq, 0.0);
double react = C1_conc * K_12;
for (const unsigned int i : fe_values.dof_indices ())
{
for (const unsigned int j : fe_values.dof_indices ())
{
cell_matrix(i, j) += ((1.0 / dt + react) *
fe_values.shape_value(i, q_point) *
fe_values.shape_value(j, q_point)
+
(diffusion_tensor * fe_values.shape_grad(i, q_point)) *
fe_values.shape_grad(j, q_point)
) * fe_values.JxW(q_point);
}
cell_rhs(i) += fe_values.shape_value(i, q_point) * (sol_C2[q_point] / dt) * fe_values.JxW(q_point);
}
}
cell->get_dof_indices (local_dof_indices);
constraints.distribute_local_to_global (cell_matrix,
cell_rhs,
local_dof_indices,
system_matrix,
system_rhs);
}
}
system_matrix.compress (VectorOperation::add);
system_rhs.compress (VectorOperation::add);
// Now set the BC
std::map<types::global_dof_index, double> boundary_values;
VectorTools::interpolate_boundary_values(dof_handler,
0,
Functions::ConstantFunction<dim>(C2_bc),
boundary_values);
VectorTools::interpolate_boundary_values(dof_handler,
2,
Functions::ConstantFunction<dim>(C2_bc),
boundary_values);
if constexpr (dim == 3)
{
VectorTools::interpolate_boundary_values(dof_handler,
4,
Functions::ConstantFunction<dim>(C2_bc),
boundary_values);
}
PETScWrappers::MPI::Vector tmp(locally_owned_dofs, mpi_communicator);
MatrixTools::apply_boundary_values(boundary_values, system_matrix, tmp, system_rhs, false);
distributed_solution_C2 = tmp;
}
template<int dim>
void Diffusion<dim>::solve_linear_problem()
{
TimerOutput::Scope ts (computing_timer, "solve_linear_system");
// CG
SolverControl solver_control (100000, 1e-12* system_rhs.l2_norm());
SolverCG<PETScWrappers::MPI::Vector> solver (solver_control);
PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix);
solver.solve (system_matrix,
distributed_solution_C2,
system_rhs,
preconditioner);
pcout << " Solved in " << solver_control.last_step () << " iterations." << std::endl;
constraints.distribute (distributed_solution_C2);
}
template <int dim>
void Diffusion<dim>::evaluate_reactions()
{
TimerOutput::Scope ts(computing_timer, "evaluate_reactions");
// 1) Assicuriamoci che i vecchi C1 e C3 siano sincronizzati nei vector locali
local_solution_old_C1.update_ghost_values();
local_solution_old_C3.update_ghost_values();
// 3) Prepara i vettori per i risultati
PETScWrappers::MPI::Vector new_solution_C1(locally_owned_dofs, mpi_communicator);
PETScWrappers::MPI::Vector new_solution_C3(locally_owned_dofs, mpi_communicator);
// 4) Copia iniziale: partiamo da C^n, poi sottrai/aggiungi reazione
new_solution_C1 = local_solution_old_C1;
new_solution_C3 = local_solution_old_C3;
// 5) Loop sui DoF locali
for (auto it = locally_owned_dofs.begin(); it != locally_owned_dofs.end(); ++it)
{
const unsigned int i = *it;
const double c1_old = local_solution_old_C1[i];
const double c2_new = local_solution_C2[i];
const double c3_old = local_solution_old_C3[i];
// tasso di reazione
const double c1_eff = std::max(c1_old - C1_eq, 0.0);
const double r = K_12 * c1_eff * c2_new;
// esplicito forward
new_solution_C1[i] = c1_old - dt * r;
new_solution_C3[i] = c3_old + dt * r;
}
// 6) Sincronizza i ghost‑values dei nuovi vettori
new_solution_C1.compress(VectorOperation::insert);
new_solution_C3.compress(VectorOperation::insert);
// 7) Applica i vincoli su C1 e C3
constraints.distribute(new_solution_C1);
constraints.distribute(new_solution_C3);
// 8) Salva nei vettori globali
distributed_solution_C1 = new_solution_C1;
distributed_solution_C3 = new_solution_C3;
}
// -----------------------------
// ---------- OUTPUT -----------
// -----------------------------
// Solution
template <int dim>
void Diffusion<dim>::output_results(const unsigned int load_step) const
{
DataOut<dim> data_out;
// Aggiungo i tre campi SCALARI: C1, C2, C3
data_out.add_data_vector(dof_handler, local_solution_C1, "C1");
data_out.add_data_vector(dof_handler, local_solution_C2, "C2");
data_out.add_data_vector(dof_handler, local_solution_C3, "C3");
// Subdomain (per mappare MPI rank)
Vector<double> subdomain(triangulation.n_active_cells());
for (unsigned int i = 0; i < subdomain.size(); ++i)
subdomain(i) = triangulation.locally_owned_subdomain();
data_out.add_data_vector(subdomain, "subdomain");
data_out.build_patches();
const std::string filename =
data_out.write_vtu_with_pvtu_record("./",
"solution",
load_step,
mpi_communicator);
if (this_mpi_process == 0)
{
static std::vector<std::pair<double, std::string>> times_and_names;
times_and_names.emplace_back(load_step, filename);
std::ofstream pvd_output("./solution.pvd");
DataOutBase::write_pvd_record(pvd_output, times_and_names);
}
}
// -----------------------------
// ----------- RUN -------------
// -----------------------------
template <int dim>
void Diffusion<dim>::run()
{
Timer timer;
timer.start ();
pcout << "Running on " << Utilities::MPI::n_mpi_processes (mpi_communicator) << " MPI rank(s)..." << std::endl;
create_mesh();
setup();
set_initial_data();
for (unsigned int step = 0; step <= n_steps; ++step)
{
const double time = step * dt;
pcout << " --- Time step : " << time << std::endl;
assemble();
solve_linear_problem();
// --- C2: update & backup ---
local_solution_C2 = distributed_solution_C2;
local_solution_C2.update_ghost_values();
local_solution_old_C2 = distributed_solution_C2;
local_solution_old_C2.update_ghost_values();
// --- Reazioni su C1 e C3 ---
evaluate_reactions();
// --- C1 & C3: update & backup ---
local_solution_C1 = distributed_solution_C1;
local_solution_C1.update_ghost_values();
local_solution_old_C1 = distributed_solution_C1;
local_solution_old_C1.update_ghost_values();
local_solution_C3 = distributed_solution_C3;
local_solution_C3.update_ghost_values();
local_solution_old_C3 = distributed_solution_C3;
local_solution_old_C3.update_ghost_values();
// ---------- Refinement & Output ----------
refine_mesh(); // dentro questa viene fatto setup() + interpolate
pcout << " Total New Number of globally active cells: " << triangulation.n_global_active_cells () << std::endl
<< " New Number of degrees of freedom: " << dof_handler.n_dofs () << std::endl;
output_results(step);
computing_timer.print_summary();
computing_timer.reset();
}
timer.stop ();
pcout << "Total run time: " << timer.wall_time () << " seconds." << std::endl;
}
} // End of namespace Diffusion
// -----------------------------
// ---------- MAIIN ------------
// -----------------------------
int main (int argc, char *argv[])
{
try
{
using namespace dealii;
using namespace diff;
Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
Diffusion<2> Diffusion;
Diffusion.run ();
}
catch (std::exception &exc)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------" << std::endl;
std::cerr << "Exception on processing: " << std::endl << exc.what ()
<< std::endl << "Aborting!" << std::endl
<< "----------------------------------------------------" << std::endl;
return 1;
}
catch (...)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------" << std::endl;
std::cerr << "Unknown exception!" << std::endl << "Aborting!" << std::endl
<< "----------------------------------------------------" << std::endl;
return 1;
}
return 0;
}