...
 
Commits (51)
......@@ -2,7 +2,7 @@ image: gcc:7
before_script:
- apt update
- apt -y install cmake libconfig++-dev libfftw3-dev libnetcdf-dev libcurl4-openssl-dev libopenmpi-dev doxygen openmpi-bin
- apt -y install cmake libconfig++-dev libfftw3-dev libnetcdf-dev libcurl4-openssl-dev libopenmpi-dev openmpi-bin
build:
stage: build
......@@ -13,9 +13,12 @@ build:
- make VERBOSE=yes
artifacts:
paths:
- build/src/stemsalabim
- build/src/libstemsalabim_lib.so
- build/tests/stemsalabim_test
- build/src/stemsalabim
- build/tests/ssb-test
- build/src/ssb-mkin
- build/src/ssb-chk
- build/src/ssb-run
expire_in: 2h
cache:
paths:
......
What's new
==========
STEMsalabim 5.0.0
-----------------
February 28th, 2019
**IMPORTANT**
The parameters `application.verbose` and `simulation.skip_simulation` are deprecated now.
The groups `adf/adf_intensities`, `cbed/cbed_intensities`, and `adf/center_of_mass` now have
a dimension for energy loss. It is usually `1` unless plasmon scattering feature is used.
Highlights
^^^^^^^^^^
- Speed improvements by increasing the grid sizes to match efficient FFT sizes. Note, that this may result
in a higher simulation grid density than specified in `grating.density` parameter!
- Alternative parallelization scheme, see :ref:`parallelization-scheme`. When appropriate, different MPI procs
now calculate different frozen phonon configurations / defoci in parallel. This reduces the required amount
of communication between the processors.
- Automatic calculation of `center of mass` of the CBEDs for all ADF points. The COMs are calculated when
`adf.enabled = true` and stored in the NC file next to `adf/adf_intensities` in `adf/center_of_mass`. Unit is mrad.
- New executables `ssb-mkin` and `ssb-run`. The former prepares an **input** NC file from which the latter can run
the simulation. This has multiple advantages. See :ref:`simulation-structure` for more information.
- Single plasmon scattering.
Other changes
^^^^^^^^^^^^^
- Removed `application.verbose` parameter.
- Removed `simulation.skip_simulation`.
- Ability to disable thermal displacements via `frozen_phonon.enable = false` parameter.
- Fixed a serious bug with the integrated defocus averaging.
- Input XYZ files can now contain more than one space or TAB character for column separation.
- Removed Doxygen documentation and doc string comments.
- Default FFTW planning is now `FFTW_MEASURE`. This improves startup times of the simulation slightly.
- Changed the chunking of the `adf/adf_intensities` and `cbed/cbed_intensities` variables for faster write speed.
- Added `AMBER/slice_coordinates` variable to the output file, that contains the `z` coordinate of the upper boundary
of each slice in nm.
- Removed HTTP reporting and CURL dependency.
- Significant code refactoring and some minor bugs fixed.
- Improved documentation.
STEMsalabim 4.0.1, 4.0.2
------------------------
......
......@@ -7,9 +7,9 @@
# version, package name and cmake version
# when you change logic here, don't forget to change the stuff in Sphinx conf.py!!
set(PACKAGE_VERSION_MAJOR "4")
set(PACKAGE_VERSION_MAJOR "5")
set(PACKAGE_VERSION_MINOR "0")
set(PACKAGE_VERSION_PATCH "2")
set(PACKAGE_VERSION_PATCH "0")
set(PACKAGE_NAME "STEMsalabim")
set(PACKAGE_DESCRIPTION "A high-performance computing cluster friendly code for scanning transmission electron microscopy image simulations of thin specimens")
set(PACKAGE_AUTHOR "Jan Oliver Oelerich")
......@@ -18,7 +18,9 @@ set(PACKAGE_AUTHOR_EMAIL "[email protected]")
project(STEMsalabim CXX)
cmake_minimum_required(VERSION 3.3)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
string(TIMESTAMP DATE "%Y-%m-%dT%H:%M:%S")
......@@ -68,19 +70,16 @@ find_package(LibConfig REQUIRED)
include_directories(${LIBCONFIG_INCLUDE_DIR})
set(LIBS ${LIBS} ${LIBCONFIG_LIBRARIES})
# look for GSL
find_package(GSL REQUIRED)
include_directories(${GSL_INCLUDE_DIRS})
set(LIBS ${LIBS} ${GSL_LIBRARIES})
# look for NetCDF
find_package(NetCDF REQUIRED)
include_directories(${NETCDF_INCLUDE_DIR})
set(LIBS ${LIBS} ${NETCDF_LIBRARIES})
# look for CURL
find_package(CURL)
if(CURL_FOUND)
include_directories(${CURL_INCLUDE_DIRS})
set(LIBS ${LIBS} ${CURL_LIBRARIES})
set(HAVE_CURL 1)
endif(CURL_FOUND)
# MPI
find_package(MPI REQUIRED)
include_directories(${MPI_INCLUDE_PATH})
......
# Tries to find Gperftools.
#
# Usage of this module as follows:
#
# find_package(Gperftools)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# Gperftools_ROOT_DIR Set this variable to the root installation of
# Gperftools if the module has problems finding
# the proper installation path.
#
# Variables defined by this module:
#
# GPERFTOOLS_FOUND System has Gperftools libs/headers
# GPERFTOOLS_LIBRARIES The Gperftools libraries (tcmalloc & profiler)
# GPERFTOOLS_INCLUDE_DIR The location of Gperftools headers
find_library(GPERFTOOLS_TCMALLOC
NAMES tcmalloc
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_PROFILER
NAMES profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_library(GPERFTOOLS_TCMALLOC_AND_PROFILER
NAMES tcmalloc_and_profiler
HINTS ${Gperftools_ROOT_DIR}/lib)
find_path(GPERFTOOLS_INCLUDE_DIR
NAMES gperftools/heap-profiler.h
HINTS ${Gperftools_ROOT_DIR}/include)
set(GPERFTOOLS_LIBRARIES ${GPERFTOOLS_TCMALLOC_AND_PROFILER})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
Gperftools
DEFAULT_MSG
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)
mark_as_advanced(
Gperftools_ROOT_DIR
GPERFTOOLS_TCMALLOC
GPERFTOOLS_PROFILER
GPERFTOOLS_TCMALLOC_AND_PROFILER
GPERFTOOLS_LIBRARIES
GPERFTOOLS_INCLUDE_DIR)
\ No newline at end of file
......@@ -23,7 +23,7 @@ IF( LIBCONFIG_ROOT )
FIND_LIBRARY(
LIBCONFIG_LIBRARIES
NAMES "config++"
NAMES "config++" "libconfig++"
PATHS ${LIBCONFIG_ROOT}
PATH_SUFFIXES "lib" "lib64"
NO_DEFAULT_PATH
......@@ -41,7 +41,7 @@ ELSE( LIBCONFIG_ROOT )
FIND_LIBRARY(
LIBCONFIG_LIBRARIES
NAMES "config++"
NAMES "config++" "libconfig++"
PATHS ${PKG_LIBCONFIG_LIBRARY_DIRS} ${INCLUDE_INSTALL_DIR}
)
......
......@@ -18,8 +18,6 @@ include(FindPackageHandleStandardArgs)
if((NOT MKL_ROOT) AND (DEFINED ENV{MKLROOT}))
set(MKL_ROOT $ENV{MKLROOT} CACHE PATH "Folder contains MKL")
else()
message( FATAL_ERROR "MKL not found! Specify MKL_ROOT!" )
endif()
if(${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "x86_64")
......
# add a target to generate API documentation with Doxygen
find_package(Doxygen)
set(doxyfile_in ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in)
set(doxyfile ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
configure_file(${doxyfile_in} ${doxyfile} @ONLY)
add_custom_target(docs-source
COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API documentation with Doxygen")
add_custom_target(docs-manual
add_custom_target(docs
COMMAND sphinx-build -c ${CMAKE_CURRENT_SOURCE_DIR} -b html ${PROJECT_SOURCE_DIR}/docs manual
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Build html documentation"
VERBATIM)
add_custom_target(docs-manual-tex
COMMAND sphinx-build -c ${CMAKE_CURRENT_SOURCE_DIR} -b latex ${PROJECT_SOURCE_DIR}/docs manual-tex
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Build html documentation"
VERBATIM)
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
#wrap {
width:1170px;
margin:0 auto;
position:relative;
}
#titlearea {
padding-bottom:20px;
}
#MSearchBox {
top:10px;
}
.ui-resizable-handle.ui-resizable-e {
background-image: none;
background-color: #5373B4;
cursor: default;
width:1px;
}
#side-nav {
padding-right:0px;
}
#nav-sync {
display:none;
}
#nav-tree, .header {
background-image:none !important;
}
#nav-tree .selected {
background-image:none;
background-color: #5373B4;
text-shadow: none;
}
.navpath ul {
background-image:none;
background-color: #F9FAFC;
border: none;
border-top:1px solid #5373B4;
padding: 10px 0 10px 0;
}
#nav-path li {
background-image:none;
}
<!-- HTML footer for doxygen 1.8.13-->
<!-- start footer part -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li>
<div id="version">
Documentation Version: $projectnumber
</div>
</li>
$navpath
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">Doxygen</a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>
$generatedby &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/>
</a> $doxygenversion
</small></address>
</div>
<!--END !GENERATE_TREEVIEW-->
<!--CUSTOMJS-->
</body>
</html>
<!-- HTML header for doxygen 1.8.13-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
<script>
$(document).ready(function() {
$(".side-nav-resizable").resizable('disable').removeClass('ui-state-disabled');
});
</script>
</head>
<body>
<div id="wrap">
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER-->&#160;<!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN SEARCHENGINE-->
<td>$searchbox</td>
<!--END SEARCHENGINE-->
<!--END DISABLE_INDEX-->
</tr>
</tbody>
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->
File formats
============
A *STEMsalabim* simulation is set-up via **input files** and its results are stored in an **output file**. The file for
A STEMsalabim simulation is set-up via **input files** and its results are stored in an **output file**. The file for
configuring a simulation is described in detail at :ref:`parameter-file`. Here, we describe the format of the **crystal
file**, i.e., the atomic information about the specimen, and the **output file**, in which the results are stored.
......@@ -46,42 +46,264 @@ Below is a very brief, artificial example (without custom slicing): ::
Output file format
------------------
All results of a *STEMsalabim* simulation are written to a binary `NetCDF <https://en.wikipedia.org/wiki/NetCDF>`_ file.
All results of a STEMsalabim simulation are written to a binary `NetCDF <https://en.wikipedia.org/wiki/NetCDF>`_ file.
The NetCDF format is based on the `Hierarchical Data Format <https://en.wikipedia.org/wiki/Hierarchical_Data_Format>`_
and there are libraries to read the data for many programming languages.
The structure of NetCDF files is hierarchical and organized in groups. The following groups are written by
*STEMsalabim*:
The structure of NetCDF files can be inspected using the handy tool ``ncdump -h YOUR_FILE.nc`` (don't forget the ``-h``
parameter, otherwise the whole content of the file is dumped!). Here is the output of an example run: ::
runtime
~~~~~~~
This group contains information about the program and the simulation, such as version, UUID and so on.
netcdf out {
group: AMBER {
dimensions:
atom = 164140 ;
elements = 1 ;
spatial = 3 ;
cell_spatial = 3 ;
cell_angular = 3 ;
label = 6 ;
frame = 10 ;
slices = 142 ;
grid_x = 490 ;
grid_y = 490 ;
variables:
char spatial(spatial) ;
char cell_spatial(cell_spatial) ;
char cell_angular(cell_angular, label) ;
float coordinates(frame, atom, spatial) ;
coordinates:unit = "nanometer" ;
float lattice_coordinates(frame, atom, spatial) ;
float cell_lengths(frame, cell_spatial) ;
cell_lengths:unit = "nanometer" ;
float cell_angles(frame, cell_angular) ;
cell_angles:unit = "degree" ;
float radius(frame, atom) ;
radius:unit = "nanometer" ;
float msd(frame, atom) ;
int slice(frame, atom) ;
float slice_coordinates(slices) ;
short element(frame, atom) ;
float system_lengths(cell_spatial) ;
float system_angles(cell_spatial) ;
char atom_types(elements, label) ;
// group attributes:
:Conventions = "AMBER" ;
:ConventionVersion = "1.0" ;
:program = "STEMsalabim" ;
:programVersion = "5.0.0b" ;
:title = "sim" ;
} // group AMBER
group: runtime {
// group attributes:
:programVersionMajor = "5" ;
:programVersionMinor = "0" ;
:programVersionPatch = "0b" ;
:gitCommit = "f1dcc606c9a78b12fc3afda9496f638992b591bf" ;
:title = "sim" ;
:UUID = "8dce768e-f1d6-4876-bb20-c301e3e323f8" ;
:time_start = "2019-02-12 13:25:43" ;
:time_stop = "2019-02-13 00:06:05" ;
} // group runtime
group: params {
dimensions:
defocus = 1 ;
plasmon_energies = 51 ;
variables:
float defocus(defocus) ;
float defocus_weights(defocus) ;
float plasmon_energies(plasmon_energies) ;
// group attributes:
:program_arguments = "--params=inp.cfg --num-threads=4 --tmp-dir=/local --output-file=out.nc" ;
:config_file_contents = "..." ;
group: application {
// group attributes:
:random_seed = 967613772U ;
} // group application
group: simulation {
// group attributes:
:title = "sim" ;
:normalize_always = 0US ;
:bandwidth_limiting = 1US ;
:output_file = "out.nc" ;
:output_compress = 0US ;
} // group simulation
group: probe {
// group attributes:
:c5 = 5000000. ;
:cs = 2000. ;
:astigmatism_ca = 0. ;
:defocus = -0. ;
:fwhm_defoci = 6. ;
:num_defoci = 1U ;
:astigmatism_angle = 0. ;
:min_apert = 0. ;
:max_apert = 15.07 ;
:beam_energy = 200. ;
:scan_density = 40. ;
} // group probe
group: specimen {
// group attributes:
:max_potential_radius = 0.3 ;
:crystal_file = "Si_110_10x10x200_300K.xyz" ;
} // group specimen
group: grating {
// group attributes:
:density = 90. ;
:nx = 490U ;
:ny = 490U ;
:slice_thickness = 0.76806 ;
} // group grating
group: adf {
// group attributes:
:enabled = 1US ;
:x = 0.5, 0.6 ;
:y = 0.5, 0.6 ;
:detector_min_angle = 0. ;
:detector_max_angle = 150. ;
:detector_num_angles = 151U ;
:detector_interval_exponent = 1.f ;
:average_configurations = 1US ;
:average_defoci = 1US ;
:save_slices_every = 10U ;
} // group adf
group: cbed {
// group attributes:
:enabled = 1US ;
:x = 0.5, 0.6 ;
:y = 0.5, 0.6 ;
:size = 0U, 0U ;
:average_configurations = 1US ;
:average_defoci = 0US ;
:save_slices_every = 101U ;
} // group cbed
group: frozen_phonon {
// group attributes:
:number_configurations = 10U ;
:fixed_slicing = 1US ;
:enabled = 1US ;
} // group frozen_phonon
group: plasmon_scattering {
// group attributes:
:enabled = 1US ;
:simple_mode = 0US ;
:plural_scattering = 0US ;
:max_energy = 25.f ;
:energy_grid_density = 2.f ;
:mean_free_path = 128.f ;
:plasmon_energy = 16.9f ;
:plasmon_fwhm = 4.f ;
} // group plasmon_scattering
} // group params
group: adf {
dimensions:
adf_position_x = 22 ;
adf_position_y = 22 ;
adf_detector_angle = 151 ;
adf_defocus = 1 ;
adf_phonon = 1 ;
adf_slice = 15 ;
coordinate_dim = 2 ;
adf_plasmon_energies = 51 ;
variables:
float adf_intensities(adf_defocus, adf_position_x, adf_position_y, adf_phonon, adf_slice, adf_plasmon_energies, adf_detector_angle) ;
float center_of_mass(adf_defocus, adf_position_x, adf_position_y, adf_phonon, adf_slice, adf_plasmon_energies, coordinate_dim) ;
double adf_probe_x_grid(adf_position_x) ;
double adf_probe_y_grid(adf_position_y) ;
double adf_detector_grid(adf_detector_angle) ;
double adf_slice_coords(adf_slice) ;
} // group adf
group: cbed {
dimensions:
cbed_position_x = 22 ;
cbed_position_y = 22 ;
cbed_k_x = 327 ;
cbed_k_y = 327 ;
cbed_defocus = 1 ;
cbed_phonon = 1 ;
cbed_slice = 2 ;
cbed_plasmon_energies = 51 ;
variables:
float cbed_intensities(cbed_defocus, cbed_position_x, cbed_position_y, cbed_phonon, cbed_slice, cbed_plasmon_energies, cbed_k_x, cbed_k_y) ;
double cbed_probe_x_grid(cbed_position_x) ;
double cbed_probe_y_grid(cbed_position_y) ;
double cbed_x_grid(cbed_k_x) ;
double cbed_y_grid(cbed_k_y) ;
double cbed_slice_coords(cbed_slice) ;
} // group cbed
}
The structure of NetCDF files is hierarchical and organized in groups. The following groups are written by
STEMsalabim:
AMBER
~~~~~
This group contains the atomic coordinates, species, displacements, radii, etc. for the complete crystal for each single
calculated frozen lattice configuration, as well as for each calculated defocus value. The AMBER group content is
compatible with the `AMBER specifications <http://ambermd.org/netcdf/nctraj.xhtml>`_. A *STEMsalabim* NetCDF file can
compatible with the `AMBER specifications <http://ambermd.org/netcdf/nctraj.xhtml>`_. A STEMsalabim NetCDF file can
be opened seamlessly with the `Ovito <http://www.ovito.org/>`_ crystal viewer.
.. csv-table::
:file: table_nc_amber.csv
runtime
~~~~~~~
.. csv-table::
:file: table_nc_runtime.csv
params
~~~~~~
All simulation parameters are collected in the ``params`` group as attributes.
.. note:: The ``params`` group contains subgroups with attributes that correspond exactly to the simulation
parameters as written, except
- ``/params/application/random_seed`` is set to the generated random seed
- ``/params/grating/nx`` and ``/params/grating/ny`` contain the simulation grid size used.
.. csv-table::
:file: table_nc_params.csv
adf
~~~
This group contains the simulated ADF intensities, the coordinates of the electron probe beam during scanning, the
detector angle grid that is used, and coordinates of the slices as used in the multi-slice algorithm.
.. csv-table::
:file: table_nc_adf.csv
cbed
~~~~
This group contains the simulated CBED intensities, the coordinates of the electron probe beam during scanning, k-space
grid, and coordinates of the slices as used in the multi-slice algorithm.
.. csv-table::
:file: table_nc_cbed.csv
Reading NC Files
----------------
......
......@@ -7,11 +7,11 @@ General information
Throughout this documentation we assume that you are familiar with the theoretical background behind the scanning
transmission electron microscope (STEM) to some degree. Also, we assume that you have some knowledge about the
UNIX/Linux command line and parallelized computation. *STEMsalabim* is currently not intended to be run on a desktop
UNIX/Linux command line and parallelized computation. STEMsalabim is currently not intended to be run on a desktop
computer. While that is possible and works, the main purpose of the program is to be used in a highly parallelized
multi-computer environment.
We took great care of making *STEMsalabim* easy to install. You can find instructions at :ref:`installing`. However, if
We took great care of making STEMsalabim easy to install. You can find instructions at :ref:`installing`. However, if
you run into technical problems you should seek help from an administrator of your computer cluster first.
.. _simulation-structure:
......@@ -19,7 +19,7 @@ you run into technical problems you should seek help from an administrator of yo
Structure of a simulation
-------------------------
There essence of *STEMsalabim* is to model the interaction of a focused electron beam with a bunch of atoms, typically
There essence of STEMsalabim is to model the interaction of a focused electron beam with a bunch of atoms, typically
in the form of a crystalline sample. Given the necessary input files, the simulation crunches numbers for some time,
after which all of the calculated results can be found in the output file. Please refer to :ref:`running` for notes
how to start a simulation.
......@@ -28,7 +28,7 @@ Input files
~~~~~~~~~~~
All information about the specimen are listed in the :ref:`crystal-file`, which is one of the two required input files
for *STEMsalabim*. It contains each atom's species (element), coordinates, and `mean square displacement
for STEMsalabim. It contains each atom's species (element), coordinates, and `mean square displacement
<https://en.wikipedia.org/wiki/Mean_squared_displacement>`_ as it appears in the `Debye-Waller factors
<https://en.wikipedia.org/wiki/Debye%E2%80%93Waller_factor>`_.
......@@ -39,7 +39,7 @@ microscope, detector, and all required simulation parameters. All these paramete
Output files
~~~~~~~~~~~~
The complete output of a *STEMsalabim* simulation is written to a `NetCDF
The complete output of a STEMsalabim simulation is written to a `NetCDF
<https://www.unidata.ucar.edu/software/netcdf/>`_ file. NetCDF is a binary, hierarchical file format for scientific
data, based on `HDF5 <https://support.hdfgroup.org/HDF5/>`_. NetCDF/HDF5 allow us to compress the output data and store
it in machine-readable, organized format while still only having to deal with a single output file.
......@@ -51,7 +51,7 @@ You can read more about the output file structure at :ref:`output-file`.
Hybrid Parallelization model
----------------------------
*STEMsalabim* simulations can be parallelized both via `POSIX threads <https://en.wikipedia.org/wiki/POSIX_Threads>`_
STEMsalabim simulations is parallelized both via `POSIX threads <https://en.wikipedia.org/wiki/POSIX_Threads>`_
and via `message passing interface (MPI) <https://en.wikipedia.org/wiki/Message_Passing_Interface>`_. A typical
simulation will use both schemes at the same time: MPI is used for communication between the computing nodes, and
threads are used for intra-node parallelization, the usual multi-cpu/multi-core structure.
......@@ -64,22 +64,31 @@ threads are used for intra-node parallelization, the usual multi-cpu/multi-core
Let us assume a simulation that runs on :math:`M` computers and each of them spawns :math:`N` threads.
There is a single, special *master thread* (the thread 0 of the MPI process with rank 0) that orchestrates the simulation,
i.e., manages and distributes work packages. All other threads (:math:`(M\times N)-1`) participate in the simulation. In
MPI mode, each MPI process writes results to its own temporary file, and after each frozen lattice configuration the
results are merged. Merging is carried out sequentially by each individual MPI processor, so that no race condition
is ran into. The parameter :code:`output.tmp_dir` (see :ref:`parameter-file`) should be set to a directory that is local
Depending on the simulation parameters chosen, STEMsalabim may need to loop through multiple frozen phonon configurations
and values of the probe defocus. The same simulation (with differently displaced atoms and different probe defocus) is
therefore typically run multiple times. There are three parallelization schemes implemented in STEMsalabim:
- When :math:`M == 1`, i.e., no MPI parallelization is used, all pixels (probe positions) are distributed among the
:math:`N` threads and calculated in parallel.
- Each MPI processor calculates *all* pixels (probe positions) of its own frozen phonon / defocus configuration, i.e.,
:math:`M` configurations are calculated in parallel. Each of the :math:`M` calculations splits its pixels between
:math:`N` threads (each thread calculates one pixel at a time).
This scheme makes sense when the total number of configurations (`probe.num_defoci` :math:`\times`
`frozen_phonon.number_configurations`) is much larger than or divisible by :math:`M`.
- A single configuration is calculated at a time, and all the pixels are split between all :math:`M \times N` threads.
In order to reduce the required MPI communication
around, only the main thread of each of the :math:`M` MPI processors communicates with the master thread. The master
thread sends a *work package* containing some number of probe pixels to be calculated to an MPI process, which then
carries out all the calculations in parallel on its :math:`N` threads. When a work package is finished, it requests another
work package from the master MPI process until there is no work left. In parallel, the worker threads of the MPI process
with rank 0 also work on emptying the work queue.
In MPI mode, each MPI process writes results to its own temporary file, and after each frozen lattice configuration the
results are merged. Merging is carried out sequentially by each individual MPI processor, to avoid race conditions.
The parameter :code:`output.tmp_dir` (see :ref:`parameter-file`) should be set to a directory that is local
to each MPI processor (e.g., :code:`/tmp`).
A typical *STEMsalabim* simulation is composed of many independent multi-slice simulations that differ only in the
position of the scanning probe. Hence, parallelization is done on the level of these multi-slice simulations, with each
thread performing them independently from other threads. In order to reduce the number of MPI messages being sent
around, only the main thread of each of the :math:`M` MPI processors communicates with the master thread. The master
thread sends a *work package* containing some number of probe pixels to be calculated to an MPI process, which then
carries out all the calculations in parallel on its :math:`N` threads. When a work package is finished, it requests another
work package from the master MPI process until there is no work left. In parallel, the worker threads of the MPI process
with rank 0 also work on emptying the work queue.
.. note:: Within one MPI processor, the threads can share their memory. As the main memory consumption comes from storing
the weak phase objects of the slices in the multi-slice simulation, which don't change during the actual simulation,
this greatly reduces memory usage as compared to MPI only parallelization. You should therefore always aim for
......
.. _reporting:
HTTP Status reporting
=====================
*STEMsalabim* simulations may take a long time, even when running them in parallel on many processors. In order to ease
tracking of the status of running simulations, we built reporting via HTTP POST requests into the program.
In order to use that feature, the `libCURL <https://curl.haxx.se/libcurl/>`_ library has to be installed and
*STEMsalabim* needs to be linked against it.
.. _configure-reporting:
Configure HTTP reporting
------------------------
To configure HTTP reporting, please add the ``http_reporting: {}`` block to your simulations's parameter file,
containing at least ``reporting = true;`` and the url to report to, ``url = "http://my_server_address:port/path";``.
If you want to use `HTTP basic authentication <https://en.wikipedia.org/wiki/Basic_access_authentication>`_,
you may also specify the options ``auth_user = "your_user";`` and ``auth_pass = "your_pass";``. Note, that HTTP basic
auth will be enabled as soon as ``auth_user`` is not empty. You should therefore only fill in that field when you want
to use authentication.
Additional, custom payload for the HTTP requests may be specified in the sub-block ``parameters: {}``. Each key-value
pair in this block is translated into JSON and appended to each request. This allows you to use custom authentication
techniques, such as token-based authentication
etc.
An example configuration block with HTTP basic authentication may look like this:
::
http_reporting: {
reporting = true;
url = "http://my_api_endpoint:8000/stemsalabim-reporting";
auth_user = "my_user";
auth_pass = "my_pass";
parameters: {
simulation_category = "suitable for many Nature papers";
}
}
The status requests
-------------------
In each request that *STEMsalabim* sends, some JSON payload is common. In addition to the JSON values specified in the
parameter file (see :ref:`configure-reporting`), the following parameters are always reported:
::
time: // The currrent date and time
id: // the UUID of the simulation
num_threads: // the number of threads of each MPI processor
num_processors: // the number of MPI processors
num_defoci: // the total number of defoci to calculate
num_configurations: // the total number of frozen phonon configurations to calculate
event: // A code for what event is reported. see below.
The following four different ``event`` codes, each for a different event, are reported:
``START_SIMULATION``: A simulation is started
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This request is sent at the beginning of a simulation. Additional key/value pairs sent are:
::
event: "START_SIMULATION"
version: // program version
git_commit: // git commit hash of the program version
title: // simulation title
``START_DEFOCUS``: A defocus iteration is started
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This request is sent at the beginning of a defocus iteration. Additional key/value pairs sent are:
::
event: "START_DEFOCUS"
defocus: // the defocus value in nm
defocus_index: // the index of the defocus, between 0 and num_defoci
``START_FP_CONFIGURATION``: A frozen phonon iteration is started
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This request is sent at the beginning of a frozen phonon configuration. Additional key/value pairs sent are:
::
event: "START_FP_CONFIGURATION"
defocus: // the defocus value in nm
defocus_index: // the index of the defocus, between 0 and num_defoci
configuration_index: // the index of the configuration, between 0 and num_configurations
``PROGRESS``: Progress report
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This request is sent during the calculation, typically after each integer percent of the simulation finished.
Additional key/value pairs sent are:
::
event: "START_CONFIGURATION"
defocus: // the defocus value in nm
defocus_index: // the index of the defocus, between 0 and num_defoci
configuration_index: // the index of the configuration, between 0 and num_configurations
progress: // progress between 0 and 1 of this configuration iteration within this defocus iteration
``FINISH``: Simulation finished
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This request is sent when the simulation finished. Additional key/value pairs sent are:
::
event: "FINISH"
How to process the reports
--------------------------
Obviously, in order to register the requests, an HTTP(S) server needs to be running on the target machine. For example,
a very simple server in python using the `http://flask.pocoo.org/ <flask>`__ package, that only echos the requests,
can be implemented as:
::
#!/usr/bin/env python
from flask import Flask
from flask import request
import json
app = Flask(__name__)
@app.route('/', methods=['POST'])
def echo():
content = request.get_json()
print(json.dumps(content, indent=4))
return ""
if __name__ == "__main__":
app.run()
Run the script and then start a *STEMsalabim* simulation to see requests imcoming.
STEMsalabim
===========
The *STEMsalabim* software aims to provide accurate scanning transmission electron microscopy (STEM) image simulation of
The STEMsalabim software aims to provide accurate scanning transmission electron microscopy (STEM) image simulation of
a specimen whose atomic structure is known. It implements the frozen lattice multi-slice algorithm as described in
great detail in the book `Advanced computing in electron microscopy <http://dx.doi.org/10.1007/978-1-4419-6533-2>`_ by
Earl J. Kirkland.
......@@ -10,7 +10,7 @@ While there are multiple existing implementations of the same technique, at the
suitable for leveraging massive parallelization available on high-performance computing (HPC) clusters, making it
possible to simulate large supercells and parameter sweeps in reasonable time.
The purpose of *STEMsalabim* is to fill this gap by providing a multi-slice implementation that is well parallelizable
The purpose of STEMsalabim is to fill this gap by providing a multi-slice implementation that is well parallelizable
both within and across computing nodes, using a mixture of threaded parallelization and message passing interface (MPI).
......@@ -18,10 +18,13 @@ both within and across computing nodes, using a mixture of threaded parallelizat
:maxdepth: 2
:caption: Getting Started
what
install
usage
visualization
bla
.. toctree::
:maxdepth: 2
:caption: More information
......@@ -29,7 +32,6 @@ both within and across computing nodes, using a mixture of threaded parallelizat
general
parameters
file_formats
http_reporting
faq
whats_new
citing
......@@ -39,7 +41,7 @@ both within and across computing nodes, using a mixture of threaded parallelizat
Contact us!
===========
*STEMsalabim* is a relatively young software package and was not heavily tested outside the scope of our group.
STEMsalabim is a relatively young software package and was not heavily tested outside the scope of our group.
We are glad to help you getting your simulations to run.
Please contact **strl-stemsalabim [at] lists.uni-marburg.de** for support or feedback.
......@@ -47,13 +49,13 @@ Please contact **strl-stemsalabim [at] lists.uni-marburg.de** for support or fee
Credits
=======
* We acknowledge the creators of the supplementary libraries that *STEMsalabim* depends on.
* We acknowledge the creators of the supplementary libraries that STEMsalabim depends on.
* We would also like to acknowledge the creators of `STEMsim <http://dx.doi.org/10.1007/978-1-4020-8615-1_36>`_,
which we used as a reference implementation to test *STEMsalabim*.
which we used as a reference implementation to test STEMsalabim.
* Once again, we would like to highlight the book
`Advanced computing in electron microscopy <http://dx.doi.org/10.1007/978-1-4419-6533-2>`_ by Earl J. Kirkland for
its detailed description of the implementation of multi-slice algorithms.
* *STEMsalabim* was written in the `Structure & Technology Research Laboratory <https://www.uni-marburg.de/wzmw/strl>`_
* STEMsalabim was written in the `Structure & Technology Research Laboratory <https://www.uni-marburg.de/wzmw/strl>`_
of the `Philipps-Universität Marburg <https://www.uni-marburg.de/>`_ with financial support by
the `German Research Foundation <http://www.dfg.de/en/>`_
......
......@@ -3,37 +3,20 @@
Installing STEMsalabim
======================
Downloading the source code
---------------------------
We recommend you download the latest stable release (|release|) from the
`Releases page <https://gitlab.com/STRL/STEMsalabim/tags>`_. If you want the latest features and/or bugfixes,
you can also clone the repository using
::
$ git clone https://gitlab.com/STRL/STEMsalabim.git
$ git checkout devel # only if you want the devel code.
Requirements
------------
The following libraries and tools are needed to successfully compile the code:
* A C++11 compiler (such as `gcc/g++ <https://gcc.gnu.org/>`_ or `intel mkl <https://software.intel.com/en-us/mkl>`_).
* A C++11 compiler (such as `gcc/g++ <https://gcc.gnu.org/>`_ or `intel compiler suite <https://software.intel.com/en-us/compilers>`_).
* `CMake <https://cmake.org/>`_ > 3.3
* `NetCDF <https://www.unidata.ucar.edu/software/netcdf/>`_
* `libConfig <http://www.hyperrealm.com/libconfig/>`_ >= 1.5
* `FFTW3 <http://www.fftw.org/>`_
* `FFTW3 <http://www.fftw.org/>`_ or `Intel's MKL <https://software.intel.com/en-us/mkl>`_
* An MPI implementation (such as `OpenMPI <http://www.open-mpi.de/>`_)
The following libraries are *optional* and are needed only to enable additional features:
* `libCurl <https://curl.haxx.se/libcurl/>`_ (required for HTTP POST status announcements)
.. note:: You may find some of the requirements in the repositories of your Linux distribution, at least the compiler,
CMake, libCurl and OpenMPI. On Debian or Ubuntu Linux, for example, you can simply run the following command
CMake, and OpenMPI. On Debian or Ubuntu Linux, for example, you can simply run the following command
to download and install all the requirements:
::
......@@ -42,15 +25,26 @@ The following libraries are *optional* and are needed only to enable additional
libconfig++-dev \
libfftw3-dev \
libnetcdf-dev \
libcurl4-openssl-dev \
doxygen \
libopenmpi-dev \
openmpi-bin
.. Tip:: As the main work of the STEM image simulations is carried out by the `FFTW3 <http://www.fftw.org/>`_
library, you may reach best performance when you compile the library yourself with all available CPU level
optimizations enabled.
.. Tip:: Most of the computing time is spent calculating Fourier transforms, so it is beneficial for STEMsalabim
to use optimized FFT libraries. Sometimes, compiling FFTW or MKL on the target machine enables
optimizations that are not available in precompiled binaries, so this may be worth a try.
Downloading the source code
---------------------------
We recommend you download the latest stable release (|release|) from the
`Releases page <https://gitlab.com/STRL/STEMsalabim/tags>`_. If you want the latest features and/or bugfixes,
you can also clone the repository using
::
$ git clone https://gitlab.com/STRL/STEMsalabim.git
$ git checkout devel # only if you want the devel code.
Building STEMsalabim
......@@ -100,17 +94,17 @@ You are now ready to execute your first simulation.
Building with Intel MKL, Intel compiler (and Intel MPI)
-------------------------------------------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to use the `Intel® Parallel Studio <https://software.intel.com/en-us/parallel-studio-xe>`_
for compilation, which includes the `Intel® Math Kernel Library (MKL) <https://software.intel.com/en-us/mkl>`_
that *STEMsalabim* can use for discrete fourier transforms instead of FFTW3. If the
that STEMsalabim can use for discrete fourier transforms instead of FFTW3. If the
`Intel® MPI Library <https://software.intel.com/en-us/intel-mpi-library>`_ is also available, it can be used
as the MPI implementation in *STEMsalabim*.
for MPI communication.
.. note:: We have tested compiling and running *STEMsalabim* only with Parallel Studio 2017 so far.
.. note:: We have tested compiling and running STEMsalabim only with Parallel Studio 2017 so far.
*STEMsalabim*'s CMake files try to find the necessary libraries themselves, when the folling conditions are true:
STEMsalabim's CMake files try to find the necessary libraries themselves, when the folling conditions are true:
1. Either the environment variable :code:`MKLROOT` is set to a valid install location of the MKL, or
the CMake variable :code:`MKL_ROOT` (pointing at the same location) is specified.
......@@ -123,10 +117,14 @@ For example, let's say the Intel suite is installed in :code:`/opt/intel` and we
$ export PATH=$PATH:/opt/intel/... # mpicxx and icpc should be in the path!
$ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/gcc-6.3/lib64 \
cmake ../source -DMKL_ROOT=/opt/intel -DCMAKE_CXX_COMPILER=icpc -DGCCDIR=/opt/gcc-6.3
cmake ../source \
-DMKL_ROOT=/opt/intel \
-DCMAKE_CXX_COMPILER=icpc \
-DGCCDIR=/opt/gcc-6.3 \
-D... more CMAKE arguments as described above.
Depending on how your environment variables are set, you may be able to skip the :code:`LD_LIBRARY_PATH=..` part.
When *STEMsalabim* is executed, you may again need to specify the library path of the :code:`libstdc++`, using ::
When STEMsalabim is executed, you may again need to specify the library path of the :code:`libstdc++`, using ::
$ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/gcc-6.3/lib64 mpirun -np ... /path/to/stemsalabim -p ...
......
......@@ -3,7 +3,7 @@
Simulation Parameters
=====================
A *STEMsalabim* simulation is mainly configured via `parameter
A STEMsalabim simulation is mainly configured via `parameter
files <#param_files>`__, with a few exceptions where configuration
options may be overriden by `command line arguments <#cli_arguments>`__.
......@@ -12,7 +12,7 @@ options may be overriden by `command line arguments <#cli_arguments>`__.
Parameter files
---------------
The configuration file that *STEMsalabim* expects for the command line
The configuration file that STEMsalabim expects for the command line
parameter ``--params`` is formatted using the simple JSON-like syntax of
`libConfig <http://www.hyperrealm.com/libconfig/>`__ syntax. Below all
available parameters are tabulated. Each section (``block`` in the
......@@ -27,7 +27,6 @@ simulation.
application: {
random_seed = 0; # the random seed. 0 -> generate
skip_simulation = false; # skip the actual multi-slice simulation (for debugging)
}
......@@ -38,12 +37,6 @@ simulation.
seed for that can be specified here. If set to 0, a random seed is generated by the program. For reproduction of
previous results, the seed can be set to a specific value.
**application.skip_simulation**
*boolean [default: `false`]*
Do not carry out the actual multi-slice simulation, but only write the crystal information to the output file. This
is for debugging purposes.
``simulation``
~~~~~~~~~~~~~~
......@@ -139,13 +132,13 @@ Parameters of the STEM probe.
**probe.fwhm_defoci**
*number (nm) [default: `6.0`]*
*STEMsalabim* can calculate a defocus series to model chromatic aberrations. In that case, this parameter is the
STEMsalabim can calculate a defocus series to model chromatic aberrations. In that case, this parameter is the
full-width-half-maximum of the normal distribution of defocus spread in ``nm``.
**probe.num_defoci**
*number [default: `1`]*
*STEMsalabim* can calculate a defocus series to model chromatic aberrations. In that case, this parameter is the
STEMsalabim can calculate a defocus series to model chromatic aberrations. In that case, this parameter is the
number of defoci calculated.
**probe.astigmatism_ca**
......@@ -217,8 +210,9 @@ Settings that describe the multi-slice algorithm, i.e., the density of the discr
*number (1/nm) [default: `360.0`]*
The density for the real space and fourier space grids. This number multiplied by the supercell size in :math:`x`
and :math:`y` direction gives the number of sampling grid points for the calculation. This also determines the
maximum angle :math:`\alpha = k\lambda` that is described by the :math:`k`-space grids.
and :math:`y` direction gives the minimal number of sampling grid points for the calculation. The actual grid
size used for the simulation may be bigger than that, as an efficient size for the fourier transforms is chosen.
This also determines the maximum angle :math:`\alpha = k\lambda` that is described by the :math:`k`-space grids.
**grating.slice_thickness**
*number (nm) [default: `0.2715`]*
......@@ -264,7 +258,7 @@ Settings for collection of ADF data.
whole ``y`` width of the supercell is scanned.
**adf.detector_min_angle**
*number (mrad) [default: `1.0`]*
*number (mrad) [default: `0.0`]*
Inner ADF detector angle in mrad.
......@@ -274,7 +268,7 @@ Settings for collection of ADF data.
Outer ADF detector angle in mrad.
**adf.detector_num_angles**
*number [default: `300`]*
*number [default: `301`]*
Number of ADF detector angle bins.
......@@ -317,14 +311,14 @@ Settings for collection of CBEDs.
::
cbed: {
enabled = true; # enable calculation and collection of CBED intensities
x = (0.0, 1.0); # [min, max] where min and max are in relative units
y = (0.0, 1.0); # [min, max] where min and max are in relative units
size = [128, 128]; # When provided, this parameter determines the size of CBEDs saved
# to the output file. The CBEDs are resized using bilinear interpolation.
save_slices_every = 0; # save only every n slices. 0 -> only the sample bottom is saved.
enabled = true; # enable calculation and collection of CBED intensities
x = (0.0, 1.0); # [min, max] where min and max are in relative units
y = (0.0, 1.0); # [min, max] where min and max are in relative units
size = [128, 128]; # When provided, this parameter determines the size of CBEDs saved
# to the output file. The CBEDs are resized using bilinear interpolation.
save_slices_every = 0; # save only every n slices. 0 -> only the sample bottom is saved.
average_configurations = true; # average the frozen phonon configurations in the output file
average_defoci = true; # average the defoci in the output file
average_defoci = true; # average the defoci in the output file
}
......@@ -379,11 +373,17 @@ Settings for the ``frozen_phonon`` algorithm to simulate TDS.
::
frozen_phonon: {
enabled = true; # enable or disable the frozen phonon feature
number_configurations = 15; # Number of frozen phonon configurations to calculate
fixed_slicing = true; # When this is true, the z coordinate is not varied during phonon vibrations.
}
**frozen_phonon.enabled**
*boolean [default: `true`]*
Whether diffuse thermal scattering via frozen phonon approximation should be enabled.
**frozen_phonon.number_configurations**
*integer [default: `1`]*
......@@ -395,57 +395,69 @@ Settings for the ``frozen_phonon`` algorithm to simulate TDS.
When true, the ``z`` coordinates (beam direction) of the atoms is not varied, resulting in fixed slicing between
subsequent frozen phonon configurations.
``http_reporting``
~~~~~~~~~~~~~~~~~~
Settings for HTTP reporting of simulation progress.
``plasmon_scattering``
~~~~~~~~~~~~~~~~~
Settings for the ``single plasmon scattering``.
::
http_reporting: { # send POST status requests of the simulation to some HTTP endpoint.
reporting = true; # reporting can be disabled with this parameter.
url = "http://my_url"; # The URL to POST to.
auth_user = "my_user"; # username for HTTP basic auth
auth_pass = "my_pass"; # password for HTTP basic auth
parameters: { # All parameters in this http_reporting.parameters are sent as query.
# in addition to the status information. Use whatever your API needs.
my_login = "username";
my_token = "abcdef";
}
plasmon_scattering: {
enabled = true; # enable or disable the feature
simple_mode = true; # No energy resolution, only E = 0 and 0 < E < max_energy
max_energy = 10; # max energy of the energy grid considered for plasmon energy transfer in eV
energy_grid_density = 10; # density of the energy grid in 1/eV
mean_free_path = 120; # mean free path of a plasmon in nm
plasmon_energy = 16.7; # plasmon energy in eV
plasmon_fwhm = 3.7; # plasmon energy FWHM in eV
}
**http_reporting.reporting**
**plasmon_scattering.enabled**
*boolean [default: `false`]*
Should the simulation status be announced via HTTP POST requests? See :ref:`reporting` for details.
Whether diffuse thermal scattering via frozen phonon approximation should be enabled.
**http_reporting.url**
*string [default: ``]*
**plasmon_scattering.simple_mode**
*integer [default: `true`]*
The HTTP API endpoint for the status reporting. See :ref:`reporting` for details.
Number of frozen phonon configurations to calculate.
**http_reporting.auth_user**
*string [default: ``]*
**plasmon_scattering.max_energy**
*float (eV) [default: `10`]*
The username for `HTTP Basic Authentication <https://en.wikipedia.org/wiki/Basic_access_authentication>`_. Leave
empty to not authenticate.
Max energy of the plasmon energy grid
**http_reporting.auth_pass**
*string [default: ``]*
**plasmon_scattering.energy_grid_density**
*float (1/eV) [default: `10`]*
Density of the plasmon energy grid
**plasmon_scattering.mean_free_path**
*float (nm) [default: `120`]*
The password for `HTTP Basic Authentication <https://en.wikipedia.org/wiki/Basic_access_authentication>`_.
Mean free path of the plasmons in the material.
**http_reporting.params**
*libConfig block [default: `{}`]*
**plasmon_scattering.plasmon_energy**
*float (eV) [default: `16.7`]*
Characteristic energy of the material's plasmons.
**plasmon_scattering.plasmon_fwhm**
*float (eV) [default: `3.7`]*
Full width half maximum of the plasmon peak of the spectrum.
Additional parameters to include in the HTTP POST JSON requests. See :ref:`reporting` for details.
.. _cli-parameters:
Command line arguments
----------------------
stemsalabim
~~~~~~~~~~~
**--help, -h**
*flag*
......@@ -459,7 +471,7 @@ Command line arguments
**--num-threads**
*integer [default: `1`]*
Number of threads per MPI core. Note, that *STEMsalabim* will do nothing if only parallelized via threads and
Number of threads per MPI core. Note, that STEMsalabim will do nothing if only parallelized via threads and
``--num-threads=1``, as thread ``0`` of the master MPI process does not participate in the calculation. See
:ref:`parallelization-scheme` for details.
......@@ -469,26 +481,40 @@ Command line arguments
The number of tasks that are sent to an MPI process. This should scale with the number of threads each MPI
process spawns. A good value is :math:`10 \times` the value of **--num_threads**.
**--skip-simulation**
*flag*
**--tmp-dir**
*string*
Override configuration parameter **application.skip_simulation** to true when this flag is set.
Override the value of the **output.tmp_dir** setting.
**--num-configurations**
*integer*
**--output-file, -o**
*string*
Override configuration parameter **frozen_phonon.number_configurations**.
Override the value of the **output.output_file** setting.
**--defocus, -d**
*float*
**--crystal-file, -c**
*string*
Override the value of the **probe.defocus** setting. If specified exactly once, a single defocus is calculated.
If specified *exactly three times*, the functionality is similar to the **probe.defocus** parameter.
Override the value of the **specimen.crystal_file** setting.
**--tmp-dir**
*string*
ssb-mkin
~~~~~~~~
Override the value of the **output.tmp_dir** setting.
**--help, -h**
*flag*
Display a help message with a brief description of available command line parameters.
**--params, -p**
*string (required)*
Path to the configuration file as explained above in :ref:`parameter-file` files.
**--num-threads**
*integer [default: `1`]*
Number of threads per MPI core. Note, that STEMsalabim will do nothing if only parallelized via threads and
``--num-threads=1``, as thread ``0`` of the master MPI process does not participate in the calculation. See
:ref:`parallelization-scheme` for details.
**--output-file, -o**
*string*
......@@ -500,7 +526,51 @@ Command line arguments
Override the value of the **specimen.crystal_file** setting.
**--title, -t**
**--stored-potentials**
*flag*
When set, ``ssb-mkin`` calculates the slice coulomb potentials and stores them in the output file.
When ``ssb-run`` is also called with ``--stored-potentials``, the potentials are read from the file instead
of being recalculated.