diff --git a/docs/source/usersguide/volume.rst b/docs/source/usersguide/volume.rst index c040eb2d7fd..1479a27d2a8 100644 --- a/docs/source/usersguide/volume.rst +++ b/docs/source/usersguide/volume.rst @@ -7,11 +7,15 @@ Stochastic Volume Calculations .. currentmodule:: openmc OpenMC has a capability to stochastically determine volumes of cells, materials, -and universes. The method works by overlaying a bounding box, sampling points -from within the box, and seeing what fraction of points were found in a desired -domain. The benefit of doing this stochastically (as opposed to equally-spaced -points), is that it is possible to give reliable error estimates on each -stochastic quantity. +and universes by two methods. The first method works by overlaying a bounding +box, sampling points from within the box, and seeing what fraction of points were +found in a desired domain. The benefit of doing this stochastically (as opposed +to equally-spaced points), is that it is possible to give reliable error estimates +on each stochastic quantity. The second method uses ray tracing in random +directions from sampled via the first method points, and seeing what fraction of +rays inside the box were found in a desired domain. This method is predominantly +intended for low-fraction volume calculations and geometry testing. By default, +volume calculations are provided using the first method. To specify that a volume calculation be run, you first need to create an instance of :class:`openmc.VolumeCalculation`. The constructor takes a list of @@ -22,10 +26,16 @@ the specified domains:: lower_left = (-0.62, -0.62, -50.) upper_right = (0.62, 0.62, 50.) vol_calc = openmc.VolumeCalculation([fuel, clad, moderator], 1000000, - lower_left, upper_right) + lower_left, upper_right) + +To specify the same volume calculation but using ray tracing, you need to pass +an argument:: + + vol_calc = openmc.VolumeCalculation([fuel, clad, moderator], 1000000, + lower_left, upper_right, 'ray') For domains contained within regions that have simple definitions, OpenMC can -sometimes automatically determine a bounding box. In this case, the last two +sometimes automatically determine a bounding box. In this case, the bounding box arguments are not necessary. For example, :: diff --git a/include/openmc/position.h b/include/openmc/position.h index 5d291d26b95..20adb8856d3 100644 --- a/include/openmc/position.h +++ b/include/openmc/position.h @@ -83,6 +83,7 @@ struct Position { return x * other.x + y * other.y + z * other.z; } inline double norm() const { return std::sqrt(x * x + y * y + z * z); } + inline double norm_sq() const { return x * x + y * y + z * z; } inline Position cross(Position other) const { return {y * other.z - z * other.y, z * other.x - x * other.z, diff --git a/include/openmc/volume_calc.h b/include/openmc/volume_calc.h index fa8d3d65ece..2dc274c3e5f 100644 --- a/include/openmc/volume_calc.h +++ b/include/openmc/volume_calc.h @@ -7,13 +7,20 @@ #include #include "openmc/array.h" +#include "openmc/cell.h" #include "openmc/openmp_interface.h" +#include "openmc/plot.h" #include "openmc/position.h" #include "openmc/tallies/trigger.h" +#include "openmc/timer.h" #include "openmc/vector.h" #include "pugixml.hpp" #include "xtensor/xtensor.hpp" + +#ifdef OPENMC_MPI +#include +#endif #ifdef _OPENMP #include #endif @@ -28,57 +35,242 @@ class VolumeCalculation { public: // Aliases, types - struct Result { - array volume; //!< Mean/standard deviation of volume + struct NuclResult { vector nuclides; //!< Index of nuclides vector atoms; //!< Number of atoms for each nuclide vector uncertainty; //!< Uncertainty on number of atoms - int iterations; //!< Number of iterations needed to obtain the results - }; // Results for a single domain + }; // Results of nuclides calculation for a single domain + + //! \brief Tally corresponding to a single material (AoS) + struct VolTally { + double score; //!< Current batch scores accumulator + array score_acc; //!< Scores and squared scores accumulator + int32_t index; //!< Material ID + + inline VolTally(const int i_material = 0, const double contrib = 0.0, + const double score_acc_ = 0.0, const double score2_acc_ = 0.0); + + // private: + //! \brief Add batch scores means to a tally + //! \param[in] batch_size_1 Inversed batch size + //! \param[in] vol_tallies All tallies + inline void finalize_batch(const double batch_size_1); + + //! \brief Pass counters data from a given tally to this + //! \param[in] vol_tally Data source + inline void assign_tally(const VolTally& vol_tally); + + //! \brief Add counters data from a given tally to this + //! \param[in] vol_tally Data source + inline void append_tally(const VolTally& vol_tally); + + //! \brief Determines given trigger condition satisfaction for this tally + // + //! \param[in] trigger_type Type of trigger condition + //! \param[in] threshold Value for trigger condition (either volume + //! fraction variance or squared rel. err. dependent on the trigger type) + //! \param[in] n_samples Statistics size + //! \return True if trigger condition is satisfied + inline bool trigger_state(const TriggerMetric trigger_type, + const double threshold, const size_t& n_samples) const; + }; + + //! \brief Online calculation results specific for each thread + //! \comment It is coupled with the related MPI structure + struct CalcResults { + uint64_t n_samples; //!< Number of samples + uint64_t n_rays; //!< Number of traced rays + uint64_t n_segs; //!< Number of traced ray segments + uint64_t n_errors; //!< Number of tracing errors + int iterations; //!< Number of iterations needed to obtain the results + double cost; //!< Product of spent time and number of threads/processes + Timer sampling_time; // Timer for measurment of the simulation + vector> + vol_tallies; //!< Volume tallies for each domain + vector + nuc_results; //!< Nuclides of each domain + + CalcResults(const VolumeCalculation& vol_calc); + + //! \brief Reset all counters + void reset(); + + //! \brief Append another counters to this + void append(const CalcResults& other); + +#ifdef OPENMC_MPI + //! \brief Collects results from all MPI processes to this + void collect_MPI(); +#endif + }; // Constructors VolumeCalculation(pugi::xml_node node); - VolumeCalculation() = default; // Methods //! \brief Stochastically determine the volume of a set of domains along with - //! the - //! average number densities of nuclides within the domain + //! the average number densities of nuclides within the domain // + //! \param[in,out] results Results of calculation entity for filling //! \return Vector of results for each user-specified domain - vector execute() const; + void execute(CalcResults& results) const; + + //! \brief Check whether a material has already been hit for a given domain. + //! If not, add new entries to the vectors + // + //! \param[in] i_material Index in global materials vector + //! \param[in] contrib Scoring value + //! \param[in,out] vol_tallies Vector of tallies corresponding to each + //! material + void check_hit(const int32_t i_material, const double contrib, + vector& vol_tallies) const; + + //! \brief Reduce vector of volumetric tallies from each thread to a single + //! copy + // + //! \param[in] local_results Results specific to each thread + //! \param[out] results Reduced results + void reduce_results( + const CalcResults& local_results, CalcResults& results) const; + + //! \brief Print volume calculation results + // + //! \param[in] results Full volume calculation results + void show_results(const CalcResults& results) const; + + //! \brief Prints a statistics parameter + // + //! \param[in] label Name of parameter + //! \param[in] units Units of measure + //! \param[in] value Value of parameter + void show_vol_stat( + const std::string label, const std::string units, const double value) const; + + //! \brief Prints volume result for a domain + // + //! \param[in] domain_type Either material or cell, ect. + //! \param[in] domain_id Number of this domain + //! \param[in] region_name Domain description + //! \param[in] mean Center of confidence interval + //! \param[in] stddev Half-width of confidence interval + void show_volume(const std::string domain_type, const int domain_id, + const std::string region_name, const double mean, + const double stddev) const; //! \brief Write volume calculation results to HDF5 file // //! \param[in] filename Path to HDF5 file to write - //! \param[in] results Vector of results for each domain - void to_hdf5( - const std::string& filename, const vector& results) const; + //! \param[in] results Results entity + void to_hdf5(const std::string& filename, const CalcResults& results) const; // Tally filter and map types enum class TallyDomain { UNIVERSE, MATERIAL, CELL }; + // Volume estimator type + enum class EstMode { REJECTION, RAYTRACE }; // Data members - TallyDomain domain_type_; //!< Type of domain (cell, material, etc.) - size_t n_samples_; //!< Number of samples to use - double threshold_ {-1.0}; //!< Error threshold for domain volumes + TallyDomain domain_type_; //!< Type of domain (cell, material, etc.) + size_t n_samples_; //!< Number of samples to use + double volume_sample_; //!< Volume of bounding primitive + double threshold_ {-1.0}; //!< Error threshold for domain volumes + double threshold_cnd_; //!< Pre-computed value for trigger condition + int max_iterations_ {INT_MAX}; //!< Limit of iterations number (necessary + //!< maximum value of data type by default) TriggerMetric trigger_type_ { TriggerMetric::not_active}; //!< Trigger metric for the volume calculation Position lower_left_; //!< Lower-left position of bounding box Position upper_right_; //!< Upper-right position of bounding box vector domain_ids_; //!< IDs of domains to find volumes of + EstMode mode_; //!< Volume estimator type + +#ifdef OPENMC_MPI + //! \brief Creates MPI structs for passing data between threads + void initialize_MPI_struct() const; + + //! \brief Deletes MPI structs for passing data between threads + void delete_MPI_struct() const; +#endif private: - //! \brief Check whether a material has already been hit for a given domain. - //! If not, add new entries to the vectors + constexpr static int _INDEX_TOTAL = + -999; //!< Index of zero-element tally for entire domain totals should be + //!< out of material ID range + + //! \brief Computes lengths of the two segments of a bounding box chord + // + //! \param[in] r Location inside bounding sphere splits the chord + //! \param[in] u Direction of chord to compute (normalized) + //! \return Array of backward (first) and forward (second) chord segments + std::pair get_box_chord( + const Position& r, const Direction& u) const; + + //! \brief Computes estimated mean and std.dev. for a tally + // + //! \param[in] n_samples Statistic's size + //! \param[in] coeff_norm Normalization coefficient to multiply + //! \param[in] vol_tally Tally + //! \return Array of mean and stddev + array get_tally_results(const size_t& n_samples, + const double coeff_norm, const VolTally& vol_tally) const; +}; + +//============================================================================== +// Volume estimator ray class +//============================================================================== + +class VolEstRay : public Ray { + friend class VolumeCalculation; // for access to the CalcResults struct and + // EstMode variable, not sure that this is + // a good practice in C++ + +public: + //! \brief Constructs a ray of given length emitted from a given point for the + //! volume track length estimator into given tallies // - //! \param[in] i_material Index in global materials vector - //! \param[in,out] indices Vector of material indices - //! \param[in,out] hits Number of hits corresponding to each material - void check_hit( - int i_material, vector& indices, vector& hits) const; + //! \param[in] r Coordinates of the ray origin + //! \param[in] u Direction of the ray + //! \param[in] dist Target length of the ray + //! \param[in] coeff_mult Coefficient for scores multiplcation, it should + //! contain the reciprocal chord lenght and may contain stat. weight, etc. + //! \param[in] vol_calc Volume calculation parameters + //! \param[in,out] results Tallies for scoring during tracing etc. + VolEstRay(Position& r, Direction& u, const double dist, + const double coeff_mult, const VolumeCalculation& vol_calc, + VolumeCalculation::CalcResults& results) + : Ray(r, u), traversal_distance_max_(dist), coeff_mult_(coeff_mult), + vol_calc_(vol_calc), results_(results) + {} + + //! \brief Track length estimator on intersection events + virtual void on_intersection() override; + +private: + const double traversal_distance_max_; //!< Target ray length + double traversal_distance_last_ = + 0.; //!< One segment beyond traversal_distance + const double coeff_mult_; //!< Coefficient for scoring multiplcation + const VolumeCalculation& vol_calc_; //!< Parameters of volume calculation + VolumeCalculation::CalcResults& + results_; //!< Tallies for scoring and other statistics collector + + //! \brief Rejection estimator + inline void score_hit(); + + //! \brief Contributes to volume tallies + // + //! \param[in] mode Type of used estimator + //! \param[in] score Contribution value + //! \param[in] id_mat Material ID required due to the material() and + //! material_lst() disagreement (see the on_intersection() body for details) + inline void vol_scoring(const VolumeCalculation::EstMode mode, + const double score, const int id_mat); + + //! \brief Returns a pointer to either current point location cell or last ray + //! segment cell + inline std::unique_ptr& current_cell( + VolumeCalculation::EstMode mode, int level); }; //============================================================================== @@ -93,35 +285,6 @@ extern vector volume_calcs; // Non-member functions //============================================================================== -//! Reduce vector of indices and hits from each thread to a single copy -// -//! \param[in] local_indices Indices specific to each thread -//! \param[in] local_hits Hit count specific to each thread -//! \param[out] indices Reduced vector of indices -//! \param[out] hits Reduced vector of hits -template -void reduce_indices_hits(const vector& local_indices, - const vector& local_hits, vector& indices, vector& hits) -{ - const int n_threads = num_threads(); - -#pragma omp for ordered schedule(static, 1) - for (int i = 0; i < n_threads; ++i) { -#pragma omp ordered - for (int j = 0; j < local_indices.size(); ++j) { - // Check if this material has been added to the master list and if - // so, accumulate the number of hits - auto it = std::find(indices.begin(), indices.end(), local_indices[j]); - if (it == indices.end()) { - indices.push_back(local_indices[j]); - hits.push_back(local_hits[j]); - } else { - hits[it - indices.begin()] += local_hits[j]; - } - } - } -} - void free_memory_volume(); } // namespace openmc diff --git a/openmc/volume.py b/openmc/volume.py index c44adf98a52..4f8c2384c04 100644 --- a/openmc/volume.py +++ b/openmc/volume.py @@ -32,6 +32,10 @@ class VolumeCalculation: Upper-right coordinates of bounding box used to sample points. If this argument is not supplied, an attempt is made to automatically determine a bounding box. + estimator_type : {'hit', 'ray'} + Type of volume estimator (default: 'hit'). + + .. versionadded:: 0.15 Attributes ---------- @@ -66,13 +70,20 @@ class VolumeCalculation: Number of iterations over samples (for calculations with a trigger). .. versionadded:: 0.12 + max_iterations : int + Limit of the maximal allowed iterations number (optional, for + calculations with a trigger). + + .. versionadded:: 0.15 """ - def __init__(self, domains, samples, lower_left=None, upper_right=None): + def __init__(self, domains, samples, lower_left=None, upper_right=None, + estimator_type='hit'): self._atoms = {} self._volumes = {} self._threshold = None self._trigger_type = None + self._max_iterations = None self._iterations = None cv.check_type('domains', domains, Iterable, @@ -125,6 +136,8 @@ def __init__(self, domains, samples, lower_left=None, upper_right=None): raise ValueError('Lower-left and upper-right bounding box ' 'coordinates must be finite.') + self.estimator_type = estimator_type + @property def ids(self): return self._ids @@ -187,6 +200,18 @@ def trigger_type(self, trigger_type): ('variance', 'std_dev', 'rel_err')) self._trigger_type = trigger_type + @property + def max_iterations(self): + return self._max_iterations + + @max_iterations.setter + def max_iterations(self, max_iterations): + name = 'volume calculation iterations limit' + cv.check_type(name, max_iterations, Integral, none_ok=True) + if max_iterations is not None: + cv.check_greater_than(name, max_iterations, 0) + self._max_iterations = max_iterations + @property def iterations(self): return self._iterations @@ -198,6 +223,16 @@ def iterations(self, iterations): cv.check_greater_than(name, iterations, 0) self._iterations = iterations + @property + def estimator_type(self): + return self._estimator_type + + @estimator_type.setter + def estimator_type(self, estimator_type): + cv.check_value('volume estimator mode', estimator_type, + ('hit', 'ray')) + self._estimator_type = estimator_type + @property def domain_type(self): return self._domain_type @@ -230,7 +265,7 @@ def atoms_dataframe(self): return pd.DataFrame.from_records(items, columns=columns) - def set_trigger(self, threshold, trigger_type): + def set_trigger(self, threshold, trigger_type, max_iterations=None): """Set a trigger on the volume calculation .. versionadded:: 0.12 @@ -241,9 +276,24 @@ def set_trigger(self, threshold, trigger_type): Threshold for the maximum standard deviation of volumes trigger_type : {'variance', 'std_dev', 'rel_err'} Value type used to halt volume calculation + max_iterations : int + Maximal allowed number of iterations (optional) """ self.trigger_type = trigger_type self.threshold = threshold + self.max_iterations = max_iterations + + def set_estimator(self, estimator_type): + """Define type of volume estimator + + .. versionadded:: 0.15 + + Parameters + ---------- + estimator_type : {'hit', 'ray'} + Either rejection or ray tracing volume estimator + """ + self.estimator_type = estimator_type @classmethod def from_hdf5(cls, filename): @@ -270,8 +320,11 @@ def from_hdf5(cls, filename): threshold = f.attrs.get('threshold') trigger_type = f.attrs.get('trigger_type') + max_iterations = f.attrs.get('max_iterations') iterations = f.attrs.get('iterations', 1) + estimator_type = f.attrs.get('estimator_type') + volumes = {} atoms = {} ids = [] @@ -301,10 +354,11 @@ def from_hdf5(cls, filename): domains = [openmc.Universe(uid) for uid in ids] # Instantiate the class and assign results - vol = cls(domains, samples, lower_left, upper_right) + vol = cls(domains, samples, lower_left, upper_right, + estimator_type.decode()) if trigger_type is not None: - vol.set_trigger(threshold, trigger_type.decode()) + vol.set_trigger(threshold, trigger_type.decode(), max_iterations) vol.iterations = iterations vol.volumes = volumes @@ -355,6 +409,10 @@ def to_xml_element(self): trigger_elem = ET.SubElement(element, "threshold") trigger_elem.set("type", self.trigger_type) trigger_elem.set("threshold", str(self.threshold)) + if self.max_iterations is not None: + trigger_elem.set("max_iterations", str(self.max_iterations)) + et_elem = ET.SubElement(element, "estimator_type") + et_elem.text = str(self.estimator_type) return element @classmethod @@ -379,6 +437,7 @@ def from_xml_element(cls, elem): samples = int(get_text(elem, "samples")) lower_left = tuple(get_elem_list(elem, "lower_left", float)) upper_right = tuple(get_elem_list(elem, "upper_right", float)) + estimator_type = get_text(elem, "estimator_type") # Instantiate some throw-away domains that are used by the constructor # to assign IDs @@ -391,13 +450,14 @@ def from_xml_element(cls, elem): elif domain_type == 'universe': domains = [openmc.Universe(uid) for uid in ids] - vol = cls(domains, samples, lower_left, upper_right) + vol = cls(domains, samples, lower_left, upper_right, estimator_type) # Check for trigger trigger_elem = elem.find("threshold") if trigger_elem is not None: trigger_type = get_text(trigger_elem, "type") threshold = float(get_text(trigger_elem, "threshold")) - vol.set_trigger(threshold, trigger_type) + max_iterations = Integral(get_text(trigger_elem, "max_iterations")) + vol.set_trigger(threshold, trigger_type, max_iterations) return vol diff --git a/src/plot.cpp b/src/plot.cpp index 2cadc48cefe..c088f5a8793 100644 --- a/src/plot.cpp +++ b/src/plot.cpp @@ -1695,6 +1695,10 @@ void Ray::trace() coord(lev).r() += boundary().distance() * coord(lev).u(); } surface() = boundary().surface(); + // Initialize last cells from current cell + for (int j = 0; j < n_coord(); ++j) { + cell_last(j) = coord(j).cell(); + } n_coord_last() = n_coord(); n_coord() = boundary().coord_level(); if (boundary().lattice_translation()[0] != 0 || diff --git a/src/volume_calc.cpp b/src/volume_calc.cpp index 1deffb80488..3372b0274db 100644 --- a/src/volume_calc.cpp +++ b/src/volume_calc.cpp @@ -3,6 +3,7 @@ #include "openmc/capi.h" #include "openmc/cell.h" #include "openmc/constants.h" +#include "openmc/distribution_multi.h" #include "openmc/error.h" #include "openmc/geometry.h" #include "openmc/hdf5_interface.h" @@ -12,6 +13,8 @@ #include "openmc/nuclide.h" #include "openmc/openmp_interface.h" #include "openmc/output.h" +#include "openmc/plot.h" +#include "openmc/random_dist.h" #include "openmc/random_lcg.h" #include "openmc/settings.h" #include "openmc/timer.h" @@ -33,7 +36,12 @@ namespace openmc { namespace model { vector volume_calcs; -} +} // namespace model + +#ifdef OPENMC_MPI +static MPI_Datatype mpi_vol_results; //!< MPI struct for CalcResults +static MPI_Datatype mpi_volume_tally; //!< MPI struct for VolTally +#endif //============================================================================== // VolumeCalculation implementation @@ -61,6 +69,10 @@ VolumeCalculation::VolumeCalculation(pugi::xml_node node) upper_right_ = get_node_array(node, "upper_right"); n_samples_ = std::stoull(get_node_value(node, "samples")); + // Determine volume of bounding box + const Position d {upper_right_ - lower_left_}; + volume_sample_ = d.x * d.y * d.z; + if (check_for_node(node, "threshold")) { pugi::xml_node threshold_node = node.child("threshold"); @@ -74,14 +86,30 @@ VolumeCalculation::VolumeCalculation(pugi::xml_node node) std::string tmp = get_node_value(threshold_node, "type"); if (tmp == "variance") { trigger_type_ = TriggerMetric::variance; + // t' = Var{\xi}/(N-1), in sq. volume fraction units + threshold_cnd_ = threshold_ / std::pow(volume_sample_, 2); + ; } else if (tmp == "std_dev") { trigger_type_ = TriggerMetric::standard_deviation; + // t' = Var{\xi}/(N-1), in sq. volume fraction units + threshold_cnd_ = std::pow(threshold_ / volume_sample_, 2); } else if (tmp == "rel_err") { trigger_type_ = TriggerMetric::relative_error; + // t' = (Var{\xi}/(N-1)) / (E{\xi})^2 < t^2, in relative units + threshold_cnd_ = threshold_ * threshold_; } else { fatal_error(fmt::format( "Invalid volume calculation trigger type '{}' provided.", tmp)); } + + if (check_for_node(threshold_node, "max_iterations")) { + max_iterations_ = + std::stoi(get_node_value(threshold_node, "max_iterations")); + if (max_iterations_ <= 0) { + fatal_error(fmt::format( + "Invalid error max_iterations {} provided.", max_iterations_)); + } + } } // Ensure there are no duplicates by copying elements to a set and then @@ -91,10 +119,28 @@ VolumeCalculation::VolumeCalculation(pugi::xml_node node) throw std::runtime_error {"Domain IDs for a volume calculation " "must be unique."}; } + + // Type of volume estimator + std::string tmp = get_node_value(node, "estimator_type"); + if (tmp == "hit") { + mode_ = EstMode::REJECTION; + } else if (tmp == "ray") { + mode_ = EstMode::RAYTRACE; + } else { + fatal_error( + fmt::format("Invalid volume calculation mode '{}' provided.", tmp)); + } } -vector VolumeCalculation::execute() const +void VolumeCalculation::execute(CalcResults& master_results) const { +#ifdef OPENMC_MPI + // MPI types are commited in the beginning of calculation and freed on the + // return, remaining a MPI type after the return produces a MPICH warning for + // memory leak + this->initialize_MPI_struct(); +#endif + // Check to make sure domain IDs are valid for (auto uid : domain_ids_) { switch (domain_type_) { @@ -121,12 +167,7 @@ vector VolumeCalculation::execute() const } // Shared data that is collected from all threads - int n = domain_ids_.size(); - vector> master_indices( - n); // List of material indices for each domain - vector> master_hits( - n); // Number of hits for each material in each domain - int iterations = 0; + const int n_domains = domain_ids_.size(); // Divide work over MPI processes uint64_t min_samples = n_samples_ / mpi::n_procs; @@ -145,245 +186,237 @@ vector VolumeCalculation::execute() const #pragma omp parallel { - // Variables that are private to each thread - vector> indices(n); - vector> hits(n); - Particle p; + // Temporary variables that are private to each thread + CalcResults results(*this); + results.sampling_time.start(); -// Sample locations and count hits + // Sample locations and count scores #pragma omp for for (size_t i = i_start; i < i_end; i++) { - uint64_t id = iterations * n_samples_ + i; + uint64_t id = master_results.iterations * n_samples_ + i; uint64_t seed = init_seed(id, STREAM_VOLUME); - p.n_coord() = 1; - Position xi {prn(&seed), prn(&seed), prn(&seed)}; - p.r() = lower_left_ + xi * (upper_right_ - lower_left_); - p.u() = {1. / std::sqrt(3.), 1. / std::sqrt(3.), 1. / std::sqrt(3.)}; - - // If this location is not in the geometry at all, move on to next block - if (!exhaustive_find_cell(p)) - continue; - - if (domain_type_ == TallyDomain::MATERIAL) { - if (p.material() != MATERIAL_VOID) { - for (int i_domain = 0; i_domain < n; i_domain++) { - if (model::materials[p.material()]->id_ == - domain_ids_[i_domain]) { - this->check_hit( - p.material(), indices[i_domain], hits[i_domain]); - break; - } - } - } - } else if (domain_type_ == TallyDomain::CELL) { - for (int level = 0; level < p.n_coord(); ++level) { - for (int i_domain = 0; i_domain < n; i_domain++) { - if (model::cells[p.coord(level).cell()]->id_ == - domain_ids_[i_domain]) { - this->check_hit( - p.material(), indices[i_domain], hits[i_domain]); - break; - } - } - } - } else if (domain_type_ == TallyDomain::UNIVERSE) { - for (int level = 0; level < p.n_coord(); ++level) { - for (int i_domain = 0; i_domain < n; ++i_domain) { - if (model::universes[p.coord(level).universe()]->id_ == - domain_ids_[i_domain]) { - check_hit(p.material(), indices[i_domain], hits[i_domain]); - break; - } - } - } - } - } + Position r {uniform_distribution(lower_left_.x, upper_right_.x, &seed), + uniform_distribution(lower_left_.y, upper_right_.y, &seed), + uniform_distribution(lower_left_.z, upper_right_.z, &seed)}; - // At this point, each thread has its own pair of index/hits lists and we - // now need to reduce them. OpenMP is not nearly smart enough to do this - // on its own, so we have to manually reduce them - for (int i_domain = 0; i_domain < n; ++i_domain) { - reduce_indices_hits(indices[i_domain], hits[i_domain], - master_indices[i_domain], master_hits[i_domain]); - } - } // omp parallel + results.n_samples++; - // Reduce hits onto master process + switch (mode_) { + case EstMode::REJECTION: { + constexpr double flt3 = 1. / std::sqrt(3.); + Direction u {flt3, flt3, flt3}; - // Determine volume of bounding box - Position d {upper_right_ - lower_left_}; - double volume_sample = d.x * d.y * d.z; + // Create zero-length ray, it is a bit excessive due to undemanded + // internal ray variables initialization + VolEstRay ray_hit(r, u, 0., 1., *this, results); + ray_hit.score_hit(); + } break; + case EstMode::RAYTRACE: { + Direction u = isotropic_direction(&seed); - // bump iteration counter and get total number - // of samples at this point - iterations++; - uint64_t total_samples = iterations * n_samples_; + // Compute lengths of the bounding box's chord segments + const auto chord_len = get_box_chord(r, u); + const double ch_len_tot = -chord_len.first + chord_len.second; - // warn user if total sample size is greater than what the uin64_t type can - // represent - if (total_samples == std::numeric_limits::max()) { - warning("The number of samples has exceeded the type used to track hits. " - "Volume results may be inaccurate."); - } + r += chord_len.first * u; + VolEstRay ray(r, u, ch_len_tot, 1. / ch_len_tot, *this, results); + ray.trace(); // Trace from a boundary to another + } + } - // reset - double trigger_val = -INFTY; + // This passing by all tallies after each sample can be + // inefficient for case of large number of domains and small + // size of batch, but the batch size is currently assigned to 1 + // for keep the input format unchanged + const double batch_size_1 = 1. / static_cast(1); + for (auto& vt : results.vol_tallies) { + for (auto& vol_tally : vt) { + vol_tally.finalize_batch(batch_size_1); + } + } + } // sample/batch loop - // Set size for members of the Result struct - vector results(n); + results.sampling_time.stop(); + results.cost = results.sampling_time.elapsed(); - for (int i_domain = 0; i_domain < n; ++i_domain) { - // Get reference to result for this domain - auto& result {results[i_domain]}; + // At this point, each thread has its own volume tallies lists and we + // now need to reduce them. OpenMP is not nearly smart enough to do this + // on its own, so we have to manually reduce them + reduce_results(results, master_results); + } // omp parallel - // Create 2D array to store atoms/uncertainty for each nuclide. Later this - // is compressed into vectors storing only those nuclides that are - // non-zero - auto n_nuc = - settings::run_CE ? data::nuclides.size() : data::mg.nuclides_.size(); - xt::xtensor atoms({n_nuc, 2}, 0.0); + // bump iteration counter + master_results.iterations++; #ifdef OPENMC_MPI - if (mpi::master) { - for (int j = 1; j < mpi::n_procs; j++) { - int q; - // retrieve results - MPI_Recv( - &q, 1, MPI_UINT64_T, j, 2 * j, mpi::intracomm, MPI_STATUS_IGNORE); - vector buffer(2 * q); - MPI_Recv(buffer.data(), 2 * q, MPI_UINT64_T, j, 2 * j + 1, - mpi::intracomm, MPI_STATUS_IGNORE); - for (int k = 0; k < q; ++k) { - bool already_added = false; - for (int m = 0; m < master_indices[i_domain].size(); ++m) { - if (buffer[2 * k] == master_indices[i_domain][m]) { - master_hits[i_domain][m] += buffer[2 * k + 1]; - already_added = true; - break; - } - } - if (!already_added) { - master_indices[i_domain].push_back(buffer[2 * k]); - master_hits[i_domain].push_back(buffer[2 * k + 1]); - } - } - } - } else { - int q = master_indices[i_domain].size(); - vector buffer(2 * q); - for (int k = 0; k < q; ++k) { - buffer[2 * k] = master_indices[i_domain][k]; - buffer[2 * k + 1] = master_hits[i_domain][k]; - } - - MPI_Send(&q, 1, MPI_UINT64_T, 0, 2 * mpi::rank, mpi::intracomm); - MPI_Send(buffer.data(), 2 * q, MPI_UINT64_T, 0, 2 * mpi::rank + 1, - mpi::intracomm); + master_results.collect_MPI(); // collect results to master process +#endif + // Process volume estimation results in master process for trigger state + // determination + bool stop_calc = + mpi::master && (trigger_type_ == TriggerMetric::not_active || + master_results.iterations == max_iterations_); + + if (!stop_calc) { + // Compute current trigger state among totals (0th elements) only + for (const auto& vt : master_results.vol_tallies) { + stop_calc = vt[0].trigger_state( + trigger_type_, threshold_cnd_, master_results.n_samples); + if (!stop_calc) + break; } + } + +#ifdef OPENMC_MPI + // Send the state of calculation continuation just obtained in master + // process to all processes + MPI_Bcast(&stop_calc, 1, MPI_CXX_BOOL, 0, mpi::intracomm); #endif - if (mpi::master) { - size_t total_hits = 0; - for (int j = 0; j < master_indices[i_domain].size(); ++j) { - total_hits += master_hits[i_domain][j]; - double f = - static_cast(master_hits[i_domain][j]) / total_samples; - double var_f = f * (1.0 - f) / total_samples; + if (!stop_calc) + continue; // while loop + // No trigger is applied or the trigger condition is satisfied, we're + // done + + if (mpi::master) { + // Normalize all results on bounding primitive volume and compute + // stddev + for (auto& vt : master_results.vol_tallies) { + for (auto& vol_tally : vt) { + vol_tally.score_acc = get_tally_results( + master_results.n_samples, volume_sample_, vol_tally); + } + } - int i_material = master_indices[i_domain][j]; - if (i_material == MATERIAL_VOID) + // Compute nuclides + for (int i_domain = 0; i_domain < n_domains; ++i_domain) { + // Create 2D array to store atoms/uncertainty for each nuclide. Later + // this is compressed into vectors storing only those nuclides that are + // non-zero + auto n_nuc = + settings::run_CE ? data::nuclides.size() : data::mg.nuclides_.size(); + xt::xtensor atoms({n_nuc, 2}, 0.0); + + for (int j = 0; j < master_results.vol_tallies[i_domain].size(); ++j) { + const int i_material = master_results.vol_tallies[i_domain][j].index; + if (i_material == MATERIAL_VOID || i_material == _INDEX_TOTAL) continue; const auto& mat = model::materials[i_material]; for (int k = 0; k < mat->nuclide_.size(); ++k) { - // Accumulate nuclide density - int i_nuclide = mat->nuclide_[k]; - atoms(i_nuclide, 0) += mat->atom_density_[k] * f; - atoms(i_nuclide, 1) += std::pow(mat->atom_density_[k], 2) * var_f; - } - } - - // Determine volume - result.volume[0] = - static_cast(total_hits) / total_samples * volume_sample; - result.volume[1] = - std::sqrt(result.volume[0] * (volume_sample - result.volume[0]) / - total_samples); - result.iterations = iterations; - - // update threshold value if needed - if (trigger_type_ != TriggerMetric::not_active) { - double val = 0.0; - switch (trigger_type_) { - case TriggerMetric::standard_deviation: - val = result.volume[1]; - break; - case TriggerMetric::relative_error: - val = result.volume[0] == 0.0 ? INFTY - : result.volume[1] / result.volume[0]; - break; - case TriggerMetric::variance: - val = result.volume[1] * result.volume[1]; - break; - default: - break; - } - // update max if entry is valid - if (val > 0.0) { - trigger_val = std::max(trigger_val, val); + auto& volume = master_results.vol_tallies[i_domain][j].score_acc; + // Collect calculated nuclide amounts N [atoms] and stddev as + // N = V [cm^3] * \ro [atoms/b-cm] * 1.e24 [b-cm/cm^3] + atoms(mat->nuclide_[k], 0) += + volume[0] * mat->atom_density(k) * 1.0e24; + atoms(mat->nuclide_[k], 1) += + volume[1] * mat->atom_density(k) * 1.0e24; } } + // Get reference to result for this domain + auto& result {master_results.nuc_results[i_domain]}; + // Convert full arrays to vectors for (int j = 0; j < n_nuc; ++j) { - // Determine total number of atoms. At this point, we have values in - // atoms/b-cm. To get to atoms we multiply by 10^24 V. - double mean = 1.0e24 * volume_sample * atoms(j, 0); - double stdev = 1.0e24 * volume_sample * std::sqrt(atoms(j, 1)); - - // Convert full arrays to vectors - if (mean > 0.0) { + if (atoms(j, 0) > 0.0) { result.nuclides.push_back(j); - result.atoms.push_back(mean); - result.uncertainty.push_back(stdev); + result.atoms.push_back(atoms(j, 0)); + result.uncertainty.push_back(atoms(j, 1)); } } - } - } // end domain loop - - // if no trigger is applied, we're done - if (trigger_type_ == TriggerMetric::not_active) { - return results; + } // end domains loop } #ifdef OPENMC_MPI - // update maximum error value on all processes - MPI_Bcast(&trigger_val, 1, MPI_DOUBLE, 0, mpi::intracomm); + // MPI types are commited in the beginning of calculation and freed on + // return, lefting MPI types after return produce MPICH warnings about + // memory leak + this->delete_MPI_struct(); #endif - // return results of the calculation - if (trigger_val < threshold_) { - return results; - } + return; -#ifdef OPENMC_MPI - // if iterating in an MPI run, need to zero indices and hits so they aren't - // counted twice - if (!mpi::master) { - for (auto& v : master_indices) { - std::fill(v.begin(), v.end(), 0); - } - for (auto& v : master_hits) { - std::fill(v.begin(), v.end(), 0); - } + } // end while +} + +void VolumeCalculation::show_vol_stat( + const std::string label, const std::string units, const double value) const +{ + fmt::print("{0:<20} = {2:10.4e} {1:<}\n", label, units, value); +} + +void VolumeCalculation::show_volume(const std::string domain_type, + const int domain_id, const std::string region_name, const double mean, + const double stddev) const +{ + fmt::print(" {0:>9}{1:>6}: {2:10.4e} +/- {3:10.4e} cm^3", domain_type, + domain_id, mean, stddev); + if (!region_name.empty()) { + fmt::print(" //{:<}", region_name); + } + fmt::print("\n"); +} + +void VolumeCalculation::show_results(const CalcResults& results) const +{ + // Show tracing statistics + write_message(5, " "); + show_vol_stat( + "Total sample size", "", static_cast(results.n_samples)); + show_vol_stat("Running cost", "thread-sec", results.cost); + + switch (mode_) { + case EstMode::REJECTION: + show_vol_stat("Cost of hitting", "thread-sec/hit", + static_cast(results.cost) / + static_cast(results.n_samples)); + break; + case EstMode::RAYTRACE: + show_vol_stat("Rays traced", "", results.n_rays); + show_vol_stat("Average ray length", "segments/ray", + static_cast(results.n_segs) / + static_cast(results.n_rays)); + show_vol_stat("Cost of tracing", "thread-sec/segment", + static_cast(results.cost) / static_cast(results.n_segs)); + if (results.n_errors != 0) + show_vol_stat("Error rate", "errors/ray", + static_cast(results.n_errors) / + static_cast(results.n_rays)); + } + write_message(5, " "); + + std::string domain_type; + if (domain_type_ == TallyDomain::CELL) { + domain_type = "Cell"; + } else if (domain_type_ == TallyDomain::MATERIAL) { + domain_type = "Material"; + } else { + domain_type = "Universe"; + } + + // Display domain volumes + for (int j = 0; j < domain_ids_.size(); j++) { + std::string region_name {""}; + if (domain_type_ == TallyDomain::CELL) { + int cell_idx = model::cell_map[domain_ids_[j]]; + region_name = model::cells[cell_idx]->name(); + } else if (domain_type_ == TallyDomain::MATERIAL) { + int mat_idx = model::material_map[domain_ids_[j]]; + region_name = model::materials[mat_idx]->name(); } -#endif + if (region_name.size()) + region_name.insert(0, " "); // prepend space for formatting - } // end while + show_volume(domain_type, domain_ids_[j], region_name, + results.vol_tallies[j][0].score_acc[0], + results.vol_tallies[j][0].score_acc[1]); + } + write_message(4, " "); // Blank line afer results printed } void VolumeCalculation::to_hdf5( - const std::string& filename, const vector& results) const + const std::string& filename, const CalcResults& results) const { // Create HDF5 file hid_t file_id = file_open(filename, 'w'); @@ -403,9 +436,20 @@ void VolumeCalculation::to_hdf5( write_attribute(file_id, "samples", n_samples_); write_attribute(file_id, "lower_left", lower_left_); write_attribute(file_id, "upper_right", upper_right_); + // Write estimator info + std::string estimator_str; + switch (mode_) { + case EstMode::REJECTION: + estimator_str = "hit"; + break; + case EstMode::RAYTRACE: + estimator_str = "ray"; + break; + } + write_attribute(file_id, "estimator_type", estimator_str); // Write trigger info if (trigger_type_ != TriggerMetric::not_active) { - write_attribute(file_id, "iterations", results[0].iterations); + write_attribute(file_id, "iterations", results.iterations); write_attribute(file_id, "threshold", threshold_); std::string trigger_str; switch (trigger_type_) { @@ -422,6 +466,10 @@ void VolumeCalculation::to_hdf5( break; } write_attribute(file_id, "trigger_type", trigger_str); + // check max_iterations on default value + if (max_iterations_ != + std::numeric_limits::max()) + write_attribute(file_id, "max_iterations", max_iterations_); } else { write_attribute(file_id, "iterations", 1); } @@ -439,8 +487,8 @@ void VolumeCalculation::to_hdf5( create_group(file_id, fmt::format("domain_{}", domain_ids_[i])); // Write volume for domain - const auto& result {results[i]}; - write_dataset(group_id, "volume", result.volume); + const auto& result {results.nuc_results[i]}; + write_dataset(group_id, "volume", results.vol_tallies[i][0].score_acc); // Create array of nuclide names from the vector auto n_nuc = result.nuclides.size(); @@ -466,24 +514,425 @@ void VolumeCalculation::to_hdf5( file_close(file_id); } -void VolumeCalculation::check_hit( - int i_material, vector& indices, vector& hits) const +void VolumeCalculation::check_hit(const int32_t i_material, + const double contrib, vector& vol_tallies) const { - - // Check if this material was previously hit and if so, increment count - bool already_hit = false; - for (int j = 0; j < indices.size(); j++) { - if (indices[j] == i_material) { - hits[j]++; - already_hit = true; + // Contribute to entire domain result tally + vol_tallies[0].score += contrib; + + // Check if this material was previously hit and if so, contribute score + for (int j = 1; j < vol_tallies.size(); j++) { + if (vol_tallies[j].index == i_material) { + vol_tallies[j].score += contrib; + return; } } - // If the material was not previously hit, append an entry to the material + // The material was not previously hit, append an entry to the material // indices and hits lists - if (!already_hit) { - indices.push_back(i_material); - hits.push_back(1); + vol_tallies.push_back(VolTally(i_material, contrib)); +} + +void VolumeCalculation::reduce_results( + const CalcResults& local_results, CalcResults& results) const +{ + auto n_threads = num_threads(); + + // Collect scalar variables +#pragma omp for ordered schedule(static, 1) + for (int i = 0; i < n_threads; ++i) { +#pragma omp ordered + { + results.n_samples += local_results.n_samples; + results.n_rays += local_results.n_rays; + results.n_segs += local_results.n_segs; + results.n_errors += local_results.n_errors; + results.cost += local_results.cost; + } + } + + // Collect vectored domain-wise results + for (int i_domain = 0; i_domain < domain_ids_.size(); ++i_domain) { + const vector& local_vol_tall = + local_results.vol_tallies[i_domain]; + vector& vol_tall = results.vol_tallies[i_domain]; + +#pragma omp for ordered schedule(static, 1) + for (int i = 0; i < n_threads; ++i) { +#pragma omp ordered + for (int j = 0; j < local_vol_tall.size(); ++j) { + // Check if this material has been added to the master list and if + // so, accumulate scores + const auto ind {local_vol_tall[j].index}; + const auto it = std::find_if(vol_tall.begin(), vol_tall.end(), + [ind](const VolTally& vt) { return vt.index == ind; }); + if (it == vol_tall.end()) { + vol_tall.push_back(local_vol_tall[j]); + } else { + vol_tall[it - vol_tall.begin()].append_tally(local_vol_tall[j]); + } + } + } + } +} + +#ifdef OPENMC_MPI +void VolumeCalculation::initialize_MPI_struct() const +{ + // This code is a slightly modified replica of initialize_mpi() from + // initialize.cpp. It works under GCC in the Release configuration, but not + // sure that the adress offsets of structure's memebrs should be necessary the + // same everywhere as using an optimizing compiler. + CalcResults cr(*this); + MPI_Aint cr_disp[5], cr_d; + MPI_Get_address(&cr, &cr_d); + MPI_Get_address(&cr.n_errors, &cr_disp[0]); + MPI_Get_address(&cr.n_rays, &cr_disp[1]); + MPI_Get_address(&cr.n_segs, &cr_disp[2]); + MPI_Get_address(&cr.n_samples, &cr_disp[3]); + MPI_Get_address(&cr.cost, &cr_disp[4]); + for (int i = 0; i < 5; i++) { + cr_disp[i] -= cr_d; + } + + int cr_blocks[] {1, 1, 1, 1, 1}; + MPI_Datatype cr_types[] { + MPI_UINT64_T, MPI_UINT64_T, MPI_UINT64_T, MPI_UINT64_T, MPI_DOUBLE}; + MPI_Type_create_struct(5, cr_blocks, cr_disp, cr_types, &mpi_vol_results); + MPI_Type_commit(&mpi_vol_results); + + VolTally vt; + MPI_Aint vt_disp[3], vt_d; + MPI_Get_address(&vt, &vt_d); + MPI_Get_address(&vt.score, &vt_disp[0]); + MPI_Get_address(&vt.score_acc, &vt_disp[1]); + MPI_Get_address(&vt.index, &vt_disp[2]); + for (int i = 0; i < 3; i++) { + vt_disp[i] -= vt_d; + } + + int vt_blocks[] {1, 2, 1}; + MPI_Datatype vt_types[] {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT}; + MPI_Type_create_struct(3, vt_blocks, vt_disp, vt_types, &mpi_volume_tally); + MPI_Type_commit(&mpi_volume_tally); +} + +void VolumeCalculation::delete_MPI_struct() const +{ + MPI_Type_free(&mpi_vol_results); + MPI_Type_free(&mpi_volume_tally); +} + +void VolumeCalculation::CalcResults::collect_MPI() +{ + vector domain_sizes(vol_tallies.size()); + + if (!mpi::master) { + // n_domain + 2 MPI messages will be send in total to node mpi::master + + // To determination of an unique tag for each MPI message (as below) + int mpi_offset = mpi::rank * (vol_tallies.size() + 2) + 2; + + // Pass root data of the struct + MPI_Send( + (void*)this, 1, mpi_vol_results, 0, mpi_offset - 2, mpi::intracomm); + + // Pass sizes of domain-wise data + for (int i_domain = 0; i_domain < vol_tallies.size(); i_domain++) { + domain_sizes[i_domain] = vol_tallies[i_domain].size(); + } + + MPI_Send(domain_sizes.data(), domain_sizes.size(), MPI_INT, 0, + mpi_offset - 1, mpi::intracomm); + + // Pass domain-wise data of struct + for (int i_domain = 0; i_domain < vol_tallies.size(); i_domain++) { + MPI_Send(vol_tallies[i_domain].data(), domain_sizes[i_domain], + mpi_volume_tally, 0, mpi_offset + i_domain, mpi::intracomm); + } + + this->reset(); // Delete passed to main process data + + } else { + + // n_domain + 2 MPI messages will be recieved in total on node mpi::master + + for (int i_proc = 1; i_proc < mpi::n_procs; i_proc++) { + + CalcResults res_buff(*this); // temporary storage for recived data + + // To determination of an unique tag for each MPI message (as above) + int mpi_offset = i_proc * (vol_tallies.size() + 2) + 2; + + // Pass root data of struct + MPI_Recv(&res_buff, 1, mpi_vol_results, i_proc, mpi_offset - 2, + mpi::intracomm, MPI_STATUS_IGNORE); + + // Pass sizes of domain-wise data + MPI_Recv(domain_sizes.data(), domain_sizes.size(), MPI_INT, i_proc, + mpi_offset - 1, mpi::intracomm, MPI_STATUS_IGNORE); + + // Pass domain-wise data of struct + for (int i_domain = 0; i_domain < vol_tallies.size(); i_domain++) { + res_buff.vol_tallies[i_domain].resize(domain_sizes[i_domain]); + MPI_Recv(res_buff.vol_tallies[i_domain].data(), domain_sizes[i_domain], + mpi_volume_tally, i_proc, mpi_offset + i_domain, mpi::intracomm, + MPI_STATUS_IGNORE); + } + + this->append(res_buff); + } + } +} +#endif + +VolumeCalculation::CalcResults::CalcResults(const VolumeCalculation& vol_calc) +{ + n_samples = 0; + n_rays = 0; + n_segs = 0; + n_errors = 0; + iterations = 0; + cost = 0.; + for (int i = 0; i < vol_calc.domain_ids_.size(); i++) { + vector vt_vect; // Tally group for a domain + vt_vect.push_back( + VolTally(_INDEX_TOTAL)); // Zero-element tally for entire domain totals + vol_tallies.push_back(vt_vect); + nuc_results.push_back(NuclResult()); + } +} + +void VolumeCalculation::CalcResults::reset() +{ + n_samples = 0; + n_rays = 0; + n_segs = 0; + n_errors = 0; + cost = 0.; + for (auto& vt : vol_tallies) { + std::fill(vt.begin(), vt.end(), VolTally()); + } +} + +void VolumeCalculation::CalcResults::append(const CalcResults& other) +{ + n_samples += other.n_samples; + n_rays += other.n_rays; + n_segs += other.n_segs; + n_errors += other.n_errors; + cost += other.cost; + + // The domain-wise vectors this.vol_tallies and other.vol_tallies are + // conformed each to other by definition + for (auto id = 0; id < vol_tallies.size(); id++) { + // Merging current domain vector from other.vol_tallies into this via + // pair-wise comparisons + for (const auto& vt_other : other.vol_tallies[id]) { + bool already_appended = false; + for (auto& vt : vol_tallies[id]) { + if (vt.index == vt_other.index) { + vt.append_tally(vt_other); + already_appended = true; + break; + } + } + if (!already_appended) + vol_tallies[id].push_back(vt_other); + } + } +} + +inline VolumeCalculation::VolTally::VolTally(const int i_material, + const double contrib, const double score_acc_, const double score2_acc_) +{ + score = contrib; + score_acc[0] = score_acc_; + score_acc[1] = score2_acc_; + index = i_material; +} + +inline void VolumeCalculation::VolTally::finalize_batch( + const double batch_size_1) +{ + if (score != 0.) { + score *= batch_size_1; + score_acc[0] += score; + score_acc[1] += score * score; + score = 0.; + } +} + +inline void VolumeCalculation::VolTally::assign_tally(const VolTally& vol_tally) +{ + score = vol_tally.score; + score_acc = vol_tally.score_acc; + index = vol_tally.index; +} + +inline void VolumeCalculation::VolTally::append_tally(const VolTally& vol_tally) +{ + score += vol_tally.score; + score_acc[0] += vol_tally.score_acc[0]; + score_acc[1] += vol_tally.score_acc[1]; +} + +inline bool VolumeCalculation::VolTally::trigger_state( + const TriggerMetric trigger_type, const double threshold, + const size_t& n_samples) const +{ + // For sample contribution to volume fraction limited by 1, maximal allowed + // n_samples value is around 1.e102 but this is still much larger than the + // size_t limit equal to ~1.8e19 + const double ns1 = static_cast(n_samples - 1); + const double ns = static_cast(n_samples); + const double mean_xi_sq = score_acc[0] * score_acc[0]; + + // score_acc[0] = N * E[\xi], score_acc[1] = N * E[\xi^2] + switch (trigger_type) { + case TriggerMetric::variance: + case TriggerMetric::standard_deviation: + // N^2 * Var[\xi] < t' * (N-1) * N^2 + return score_acc[1] * ns - mean_xi_sq < threshold * ns1 * ns * ns; + case TriggerMetric::relative_error: + // N^2 * Var[\xi] < t' * (N-1) * (N * E[\xi])^2 + return score_acc[1] * ns - mean_xi_sq < threshold * ns1 * mean_xi_sq; + default: + return true; + } +} + +array VolumeCalculation::get_tally_results(const size_t& n_samples, + const double coeff_norm, const VolTally& vol_tally) const +{ + array volume; + const double ns_1 = 1. / static_cast(n_samples); + volume[0] = vol_tally.score_acc[0] * ns_1; + volume[1] = vol_tally.score_acc[1] * ns_1; + volume[1] = std::sqrt( + (volume[1] - volume[0] * volume[0]) / static_cast(n_samples - 1)); + volume[0] *= coeff_norm; + volume[1] *= coeff_norm; + return volume; +} + +std::pair VolumeCalculation::get_box_chord( + const Position& r, const Direction& u) const +{ + // Compute distanses to each box plane orthogonal to an axis + Direction u_1 = {1., 1., 1.}; + u_1 = u_1 / u; + Position xi = (lower_left_ - r) * u_1; + const array dist1 = {xi.x, xi.y, xi.z}; + xi = (upper_right_ - r) * u_1; + const array dist2 = {xi.x, xi.y, xi.z}; + + // Find minimal forward (positive values) and backward (negative values) + // distances among the computed ones (probably there is some STL + // alternative) + std::pair chord_lengths {std::minmax(dist1[0], dist2[0])}; + for (int i = 1; i < 3; i++) { + const std::pair dist_mm = std::minmax(dist1[i], dist2[i]); + chord_lengths.first = std::max(chord_lengths.first, dist_mm.first); + chord_lengths.second = std::min(chord_lengths.second, dist_mm.second); + } + return chord_lengths; +} + +void VolEstRay::on_intersection() +{ + if (traversal_distance_ == 0.) { + return; // No crossing model + } + + results_.n_segs++; + if (traversal_distance_last_ == 0.) { + results_.n_rays++; // First segment of new ray + } + + // At this point, current GeometryState parameters represent the cell behind + // crossed surface but the segment length is known for the previous + // cell only, therefore we use below the last cell keept in GeometryState + const auto score = (std::min(traversal_distance_, traversal_distance_max_) - + traversal_distance_last_) * + coeff_mult_; + + //---------------------------------------------------------------------------- + // Tracing error diagnostic + // TODO: those can be implemented here clever diagnostic and guidance for + // user to fix input mistakes + //---------------------------------------------------------------------------- + + if (traversal_distance_ >= traversal_distance_max_) { + stop(); + } else { + traversal_distance_last_ = traversal_distance_; + } + + // In a case of single-segment ray (leakage after 1st surface crossing), + // current segment material ID seems to be contained in material() but in + // other cases it is in material_last(). Due to this unclear behavior, it is + // used below more fundamental way for material definition -- via the stable + // last cell record. + vol_scoring(VolumeCalculation::EstMode::RAYTRACE, score, + (current_cell(VolumeCalculation::EstMode::RAYTRACE, n_coord_last() - 1) + ->material(n_coord_last() - 1))); +} + +void VolEstRay::score_hit() +{ + if (exhaustive_find_cell(*this)) + vol_scoring(VolumeCalculation::EstMode::REJECTION, 1., + material()); // One hit score +} + +void VolEstRay::vol_scoring( + const VolumeCalculation::EstMode mode, const double score, const int id_mat) +{ + const auto n_domains = vol_calc_.domain_ids_.size(); + + switch (vol_calc_.domain_type_) { + case VolumeCalculation::TallyDomain::MATERIAL: + if (id_mat != MATERIAL_VOID) { + for (auto i_domain = 0; i_domain < n_domains; i_domain++) { + if (model::materials[id_mat]->id_ == vol_calc_.domain_ids_[i_domain]) { + vol_calc_.check_hit(id_mat, score, results_.vol_tallies[i_domain]); + break; + } + } + } + break; + case VolumeCalculation::TallyDomain::CELL: + for (auto level = 0; level < n_coord_last(); ++level) { + for (auto i_domain = 0; i_domain < n_domains; i_domain++) { + if (current_cell(mode, level)->id_ == vol_calc_.domain_ids_[i_domain]) { + vol_calc_.check_hit(id_mat, score, results_.vol_tallies[i_domain]); + break; + } + } + } + break; + case VolumeCalculation::TallyDomain::UNIVERSE: + for (auto level = 0; level < n_coord_last(); ++level) { + for (auto i_domain = 0; i_domain < n_domains; ++i_domain) { + if (model::universes[current_cell(mode, level)->universe_]->id_ == + vol_calc_.domain_ids_[i_domain]) { + vol_calc_.check_hit(id_mat, score, results_.vol_tallies[i_domain]); + break; + } + } + } + } +} + +inline std::unique_ptr& VolEstRay::current_cell( + VolumeCalculation::EstMode mode, int level) +{ + switch (mode) { + case VolumeCalculation::EstMode::REJECTION: + return model::cells[coord(level).cell()]; // Current position + case VolumeCalculation::EstMode::RAYTRACE: + return model::cells[cell_last(level)]; // Previous segment } } @@ -514,43 +963,18 @@ int openmc_calculate_volumes() // Run volume calculation const auto& vol_calc {model::volume_calcs[i]}; - std::vector results; + VolumeCalculation::CalcResults results(vol_calc); try { - results = vol_calc.execute(); + vol_calc.execute(results); } catch (const std::exception& e) { set_errmsg(e.what()); return OPENMC_E_UNASSIGNED; } if (mpi::master) { - std::string domain_type; - if (vol_calc.domain_type_ == VolumeCalculation::TallyDomain::CELL) { - domain_type = " Cell "; - } else if (vol_calc.domain_type_ == - VolumeCalculation::TallyDomain::MATERIAL) { - domain_type = " Material "; - } else { - domain_type = " Universe "; - } - // Display domain volumes - for (int j = 0; j < vol_calc.domain_ids_.size(); j++) { - std::string region_name {""}; - if (vol_calc.domain_type_ == VolumeCalculation::TallyDomain::CELL) { - int cell_idx = model::cell_map[vol_calc.domain_ids_[j]]; - region_name = model::cells[cell_idx]->name(); - } else if (vol_calc.domain_type_ == - VolumeCalculation::TallyDomain::MATERIAL) { - int mat_idx = model::material_map[vol_calc.domain_ids_[j]]; - region_name = model::materials[mat_idx]->name(); - } - if (region_name.size()) - region_name.insert(0, " "); // prepend space for formatting - - write_message(4, "{}{}{}: {} +/- {} cm^3", domain_type, - vol_calc.domain_ids_[j], region_name, results[j].volume[0], - results[j].volume[1]); - } + // Output volume calculation results and statistics + vol_calc.show_results(results); // Write volumes to HDF5 file std::string filename = @@ -561,7 +985,7 @@ int openmc_calculate_volumes() // Show elapsed time time_volume.stop(); - write_message(6, "Elapsed time: {} s", time_volume.elapsed()); + write_message(6, "Elapsed time: {} sec", time_volume.elapsed()); return 0; } diff --git a/tests/regression_tests/volume_calc/inputs_true.dat b/tests/regression_tests/volume_calc/inputs_true.dat index ed7024c5772..402ab17ecd7 100644 --- a/tests/regression_tests/volume_calc/inputs_true.dat +++ b/tests/regression_tests/volume_calc/inputs_true.dat @@ -17,7 +17,7 @@ - + @@ -32,6 +32,7 @@ 100000 -1.0 -1.0 -6.0 1.0 1.0 6.0 + hit material @@ -39,6 +40,7 @@ 100000 -1.0 -1.0 -6.0 1.0 1.0 6.0 + hit universe @@ -46,6 +48,7 @@ 100000 -1.0 -1.0 -6.0 1.0 1.0 6.0 + hit cell @@ -54,6 +57,7 @@ -1.0 -1.0 -6.0 1.0 1.0 6.0 + ray material @@ -62,6 +66,7 @@ -1.0 -1.0 -6.0 1.0 1.0 6.0 + ray cell @@ -70,6 +75,16 @@ -1.0 -1.0 -6.0 1.0 1.0 6.0 + ray + + + material + 1 2 + 100 + -1.0 -1.0 -6.0 + 1.0 1.0 6.0 + + hit diff --git a/tests/regression_tests/volume_calc/inputs_true_mg.dat b/tests/regression_tests/volume_calc/inputs_true_mg.dat index 127566084eb..ca65ee44de5 100644 --- a/tests/regression_tests/volume_calc/inputs_true_mg.dat +++ b/tests/regression_tests/volume_calc/inputs_true_mg.dat @@ -17,7 +17,7 @@ - + @@ -33,6 +33,7 @@ 100000 -1.0 -1.0 -6.0 1.0 1.0 6.0 + hit material @@ -40,6 +41,7 @@ 100000 -1.0 -1.0 -6.0 1.0 1.0 6.0 + hit universe @@ -47,6 +49,7 @@ 100000 -1.0 -1.0 -6.0 1.0 1.0 6.0 + hit cell @@ -55,6 +58,7 @@ -1.0 -1.0 -6.0 1.0 1.0 6.0 + ray material @@ -63,6 +67,7 @@ -1.0 -1.0 -6.0 1.0 1.0 6.0 + ray cell @@ -71,6 +76,16 @@ -1.0 -1.0 -6.0 1.0 1.0 6.0 + ray + + + material + 1 2 + 100 + -1.0 -1.0 -6.0 + 1.0 1.0 6.0 + + hit diff --git a/tests/regression_tests/volume_calc/results_true.dat b/tests/regression_tests/volume_calc/results_true.dat index 9ab9646f91d..069181c3688 100644 --- a/tests/regression_tests/volume_calc/results_true.dat +++ b/tests/regression_tests/volume_calc/results_true.dat @@ -1,7 +1,8 @@ Volume calculation 0 Trigger Type: None Trigger threshold: None -Iterations: 1 +Iterations limit: None +Iterations completed: 1 Domain 1: 31.36+/-0.07 cm^3 Domain 2: 2.107+/-0.031 cm^3 Domain 3: 2.164+/-0.031 cm^3 @@ -17,7 +18,8 @@ Domain 3: 2.164+/-0.031 cm^3 Volume calculation 1 Trigger Type: None Trigger threshold: None -Iterations: 1 +Iterations limit: None +Iterations completed: 1 Domain 1: 4.27+/-0.04 cm^3 Domain 2: 31.36+/-0.07 cm^3 Material Nuclide Atoms @@ -29,7 +31,8 @@ Domain 2: 31.36+/-0.07 cm^3 Volume calculation 2 Trigger Type: None Trigger threshold: None -Iterations: 1 +Iterations limit: None +Iterations completed: 1 Domain 0: 35.63+/-0.07 cm^3 Universe Nuclide Atoms 0 0 H1 (2.856+/-0.029)e+23 @@ -40,23 +43,55 @@ Domain 0: 35.63+/-0.07 cm^3 Volume calculation 3 Trigger Type: std_dev Trigger threshold: 0.1 -Iterations: 523 -Domain 1: 31.31+/-0.10 cm^3 -Domain 2: 2.10+/-0.04 cm^3 -Domain 3: 2.19+/-0.04 cm^3 +Iterations limit: None +Iterations completed: 254 +Domain 1: 31.34+/-0.10 cm^3 +Domain 2: 2.04+/-0.04 cm^3 +Domain 3: 2.08+/-0.04 cm^3 Cell Nuclide Atoms -0 1 U235 (3.464+/-0.011)e+23 -1 1 Mo99 (3.464+/-0.011)e+22 -2 2 H1 (1.401+/-0.029)e+23 -3 2 O16 (7.01+/-0.14)e+22 -4 2 B10 (7.01+/-0.14)e+18 -5 3 H1 (1.464+/-0.029)e+23 -6 3 O16 (7.32+/-0.15)e+22 -7 3 B10 (7.32+/-0.15)e+18 +0 1 U235 (3.468+/-0.011)e+23 +1 1 Mo99 (3.468+/-0.011)e+22 +2 2 H1 (1.362+/-0.029)e+23 +3 2 O16 (6.81+/-0.15)e+22 +4 2 B10 (6.81+/-0.15)e+18 +5 3 H1 (1.392+/-0.029)e+23 +6 3 O16 (6.96+/-0.15)e+22 +7 3 B10 (6.96+/-0.15)e+18 Volume calculation 4 Trigger Type: rel_err Trigger threshold: 0.1 -Iterations: 9 +Iterations limit: None +Iterations completed: 6 +Domain 1: 4.0+/-0.4 cm^3 +Domain 2: 31.4+/-0.6 cm^3 + Material Nuclide Atoms +0 1 H1 (2.71+/-0.25)e+23 +1 1 O16 (1.35+/-0.13)e+23 +2 1 B10 (1.35+/-0.13)e+19 +3 2 U235 (3.48+/-0.07)e+23 +4 2 Mo99 (3.48+/-0.07)e+22 +Volume calculation 5 +Trigger Type: variance +Trigger threshold: 0.05 +Iterations limit: None +Iterations completed: 51 +Domain 1: 31.27+/-0.22 cm^3 +Domain 2: 1.88+/-0.09 cm^3 +Domain 3: 2.19+/-0.10 cm^3 + Cell Nuclide Atoms +0 1 U235 (3.460+/-0.025)e+23 +1 1 Mo99 (3.460+/-0.025)e+22 +2 2 H1 (1.26+/-0.06)e+23 +3 2 O16 (6.30+/-0.31)e+22 +4 2 B10 (6.30+/-0.31)e+18 +5 3 H1 (1.46+/-0.07)e+23 +6 3 O16 (7.32+/-0.34)e+22 +7 3 B10 (7.32+/-0.34)e+18 +Volume calculation 6 +Trigger Type: rel_err +Trigger threshold: 1e-10 +Iterations limit: 9 +Iterations completed: 9 Domain 1: 5.3+/-0.5 cm^3 Domain 2: 29.9+/-0.8 cm^3 Material Nuclide Atoms @@ -65,19 +100,3 @@ Domain 2: 29.9+/-0.8 cm^3 2 1 B10 (1.77+/-0.17)e+19 3 2 U235 (3.31+/-0.09)e+23 4 2 Mo99 (3.31+/-0.09)e+22 -Volume calculation 5 -Trigger Type: variance -Trigger threshold: 0.05 -Iterations: 105 -Domain 1: 31.16+/-0.22 cm^3 -Domain 2: 1.91+/-0.09 cm^3 -Domain 3: 2.32+/-0.10 cm^3 - Cell Nuclide Atoms -0 1 U235 (3.448+/-0.025)e+23 -1 1 Mo99 (3.448+/-0.025)e+22 -2 2 H1 (1.28+/-0.06)e+23 -3 2 O16 (6.39+/-0.31)e+22 -4 2 B10 (6.39+/-0.31)e+18 -5 3 H1 (1.55+/-0.07)e+23 -6 3 O16 (7.76+/-0.34)e+22 -7 3 B10 (7.76+/-0.34)e+18 diff --git a/tests/regression_tests/volume_calc/test.py b/tests/regression_tests/volume_calc/test.py index c94d16c7975..5922ea28bd2 100644 --- a/tests/regression_tests/volume_calc/test.py +++ b/tests/regression_tests/volume_calc/test.py @@ -16,7 +16,9 @@ def __init__(self, is_ce, *args, **kwargs): self.exp_std_dev = 1e-01 self.exp_rel_err = 1e-01 + self.tiny_rel_err = 1e-10 self.exp_variance = 5e-02 + self.max_iterations = 9 self.is_ce = is_ce if not is_ce: self.inputs_true = 'inputs_true_mg.dat' @@ -49,7 +51,7 @@ def __init__(self, is_ce, *args, **kwargs): # Define geometry inside_cyl = openmc.Cell(1, fill=fuel, region=-cyl & -top_plane & +bottom_plane) top_hemisphere = openmc.Cell(2, fill=water, region=-top_sphere & +top_plane) - bottom_hemisphere = openmc.Cell(3, fill=water, region=-bottom_sphere & -top_plane) + bottom_hemisphere = openmc.Cell(3, fill=water, region=-bottom_sphere & -bottom_plane) root = openmc.Universe(0, cells=(inside_cyl, top_hemisphere, bottom_hemisphere)) self._model.geometry = openmc.Geometry(root) @@ -60,9 +62,12 @@ def __init__(self, is_ce, *args, **kwargs): openmc.VolumeCalculation(list(root.cells.values()), 100000), openmc.VolumeCalculation([water, fuel], 100000, ll, ur), openmc.VolumeCalculation([root], 100000, ll, ur), - openmc.VolumeCalculation(list(root.cells.values()), 100), - openmc.VolumeCalculation([water, fuel], 100, ll, ur), - openmc.VolumeCalculation(list(root.cells.values()), 100) + openmc.VolumeCalculation(list(root.cells.values()), 100, + estimator_type = 'ray'), + openmc.VolumeCalculation([water, fuel], 100, ll, ur, 'ray'), + openmc.VolumeCalculation(list(root.cells.values()), 100, + estimator_type = 'ray'), + openmc.VolumeCalculation([water, fuel], 100, ll, ur) ] vol_calcs[3].set_trigger(self.exp_std_dev, 'std_dev') @@ -71,6 +76,9 @@ def __init__(self, is_ce, *args, **kwargs): vol_calcs[5].set_trigger(self.exp_variance, 'variance') + vol_calcs[6].set_trigger(self.tiny_rel_err, 'rel_err', + self.max_iterations) + # Define settings settings = openmc.Settings() settings.run_mode = 'volume' @@ -130,30 +138,45 @@ def _get_results(self): outstr += 'Trigger Type: {}\n'.format(volume_calc.trigger_type) outstr += 'Trigger threshold: {}\n'.format(volume_calc.threshold) - outstr += 'Iterations: {}\n'.format(volume_calc.iterations) + outstr += 'Iterations limit: {}\n'.format(volume_calc.max_iterations) + outstr += 'Iterations completed: {}\n'.format(volume_calc.iterations) + # make sure the volume calculation results contains true + # values of trigger and iteration limit if i == 3: assert volume_calc.trigger_type == 'std_dev' assert volume_calc.threshold == self.exp_std_dev + assert volume_calc.max_iterations is None elif i == 4: assert volume_calc.trigger_type == 'rel_err' assert volume_calc.threshold == self.exp_rel_err + assert volume_calc.max_iterations is None elif i == 5: assert volume_calc.trigger_type == 'variance' assert volume_calc.threshold == self.exp_variance + assert volume_calc.max_iterations is None + elif i == 6: + assert volume_calc.trigger_type == 'rel_err' + assert volume_calc.threshold == self.tiny_rel_err + assert volume_calc.max_iterations == self.max_iterations else: assert volume_calc.trigger_type is None assert volume_calc.threshold is None + assert volume_calc.max_iterations is None assert volume_calc.iterations == 1 - # if a trigger is applied, make sure the calculation satisfies the trigger + # if a trigger is applied, make sure the calculation satisfies + # the trigger and iteration limit for vol in volume_calc.volumes.values(): if volume_calc.trigger_type == 'std_dev': - assert vol.std_dev <= self.exp_std_dev + assert (vol.std_dev <= volume_calc.threshold or + volume_calc.iterations == volume_calc.max_iterations) if volume_calc.trigger_type == 'rel_err': - assert vol.std_dev/vol.nominal_value <= self.exp_rel_err + assert (vol.std_dev/vol.nominal_value <= volume_calc.threshold or + volume_calc.iterations == volume_calc.max_iterations) if volume_calc.trigger_type == 'variance': - assert vol.std_dev * vol.std_dev <= self.exp_variance + assert (vol.std_dev * vol.std_dev <= volume_calc.threshold or + volume_calc.iterations == volume_calc.max_iterations) # Write cell volumes and total # of atoms for each nuclide for uid, volume in sorted(volume_calc.volumes.items()):