diff --git a/docs/user/Notebooks/1-Meshes.ipynb b/docs/user/Notebooks/1-Meshes.ipynb index 930d65ea..df91378c 100644 --- a/docs/user/Notebooks/1-Meshes.ipynb +++ b/docs/user/Notebooks/1-Meshes.ipynb @@ -14,15 +14,13 @@ }, "source": [ "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", + " \n", + "![](media/SampleSphericalMesh.png)\n", + "\n", "
\n", "\n", "\n", - "# Notebook 1: Meshes\n", + "## Notebook 1: Meshes\n", "\n", "Introducing meshes: how to build them, interrogate them and visualise them.\n", "\n", @@ -36,7 +34,6 @@ "\n", "Mesh adaptivity is a work-in-progress.\n", "\n", - "\n", "\n" ] }, @@ -64,7 +61,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 2, "id": "5a4437ff-63ae-4ffa-9f2b-8ec1b8b4baeb", "metadata": { "editable": true, @@ -74,7 +71,15 @@ }, "tags": [] }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PostHog telemetry failed: HTTPSConnectionPool(host='eu.i.posthog.com', port=443): Max retries exceeded with url: /capture/ (Caused by NameResolutionError(\": Failed to resolve 'eu.i.posthog.com' ([Errno 8] nodename nor servname provided, or not known)\"))\n" + ] + } + ], "source": [ "#| output: false # Suppress warnings in html version\n", "\n", @@ -100,7 +105,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 22, "id": "1c928d60-24a9-4415-ab22-41834695c70a", "metadata": { "editable": true, @@ -109,15 +114,26 @@ }, "tags": [] }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Constructing UW mesh from gmsh .meshes/uw_cubed_spherical_shell_ro1.0_ri0.547_elts8_plexTrue.msh\n", + "Mesh refinement levels: 0\n", + "Mesh coarsening levels: None\n", + "Populating mesh coordinates CoordinateSystemType.SPHERICAL\n" + ] + } + ], "source": [ "mesh = uw.meshing.uw.meshing.CubedSphere(\n", " radiusOuter=1.0,\n", " radiusInner=0.547,\n", " numElements=8,\n", " refinement=0,\n", - " simplex=False,\n", - " verbose=False,\n", + " simplex=True,\n", + " verbose=True,\n", ")" ] }, @@ -141,7 +157,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 23, "id": "b9cfac97-5c19-46b1-aebd-0fb77678b9ed", "metadata": {}, "outputs": [ @@ -152,12 +168,12 @@ " [-0.57735027, 0.57735027, -0.57735027],\n", " [-0.57735027, -0.57735027, -0.57735027],\n", " ...,\n", - " [ 0.58691901, -0.41511058, -0.41511058],\n", - " [ 0.6269543 , -0.44342636, -0.44342636],\n", - " [ 0.66698958, -0.47174214, -0.47174214]])" + " [ 0.61340251, 0.4866531 , 0.41538064],\n", + " [ 0.56556024, 0.42209717, 0.44274422],\n", + " [ 0.51755571, 0.41031492, 0.44421012]])" ] }, - "execution_count": 4, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -166,6 +182,14 @@ "mesh.data" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7c5dd7e-58d2-45d0-bfc6-eaded6da20f0", + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "markdown", "id": "1ed56724-adc5-49c6-a33b-4552b619cb07", @@ -208,7 +232,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 26, "id": "1cb8378c-cdb8-4e75-bd12-e85f12169ef7", "metadata": { "editable": true, @@ -226,13 +250,14 @@ "\n", "pvmesh = vis.mesh_to_pv_mesh(mesh)\n", "pvmesh.point_data[\"z\"] = vis.scalar_fn_to_pv_points(pvmesh, mesh.CoordinateSystem.X[2])\n", + "pvmesh1 = pvmesh.copy()\n", "\n", "if mesh.dim==3:\n", " pvmesh_c = pvmesh.clip( normal='z', crinkle=True, inplace=False, origin=(0.0,0.0,0.01))\n", "\n", "pl = pv.Plotter(window_size=(750, 750))\n", "pl.add_mesh(pvmesh_c, show_edges=True, show_scalar_bar=False, opacity=1.0)\n", - "pl.add_mesh(pvmesh, show_edges=True, show_scalar_bar=False, opacity=0.3)\n", + "pl.add_mesh(pvmesh1, show_edges=True, show_scalar_bar=False, opacity=0.3)\n", "\n", "\n", "# Save and show the mesh\n", @@ -241,7 +266,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 27, "id": "fd6cbed0-3159-4c6c-a482-6a9636158854", "metadata": { "editable": true, @@ -266,10 +291,10 @@ " " ], "text/plain": [ - "" + "" ] }, - "execution_count": 6, + "execution_count": 27, "metadata": {}, "output_type": "execute_result" } @@ -320,10 +345,10 @@ { "data": { "text/latex": [ - "$\\displaystyle \\left[\\begin{matrix}{ r \\hspace{ 0.0pt } } & { \\theta \\hspace{ 0.01pt } } & { \\phi \\hspace{ 0.02pt } }\\end{matrix}\\right]$" + "$\\displaystyle \\left[\\begin{matrix}r & \\theta & \\phi\\end{matrix}\\right]$" ], "text/plain": [ - "Matrix([[{ r \\hspace{ 0.0pt } }, { \\theta \\hspace{ 0.01pt } }, { \\phi \\hspace{ 0.02pt } }]])" + "Matrix([[r, \\theta, \\phi]])" ] }, "metadata": {}, @@ -382,33 +407,44 @@ "\n", "\n", "Mesh # 0: .meshes/uw_cubed_spherical_shell_ro1.0_ri0.547_elts8_plexFalse.msh\n", + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f18b77ba1d8d44ae96a3832ea3ffa5a3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Widget(value='\n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "#| fig-cap: \"Interactive Image: Convection model output\"\n", "from IPython.display import IFrame\n", + "\n", "IFrame(src=\"html5/annulus_convection_plot.html\", width=500, height=400)" ] }, @@ -468,8 +646,9 @@ "Based on our previous notebook, can you see how to calculate and (if necessary) remove rigid-body the rotation \n", "null-space from the solution ? \n", "\n", - "The use of a coarse-level singular-value decomposition for the velocity solver should help, in this case, but it's wise to \n", - "check anyway.\n", + "The use of a coarse-level singular-value decomposition for the velocity solver should help, in this case, but sometimes\n", + "you can see that there is a rigid body rotation (look at the streamlines). It's wise to check and quantify the presence of \n", + "the null space.\n", "\n", "```python\n", " stokes.petsc_options[\"fieldsplit_velocity_mg_coarse_pc_type\"] = \"svd\"\n", @@ -505,9 +684,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python (Pixi)", "language": "python", - "name": "python3" + "name": "pixi-kernel-python3" }, "language_info": { "codemirror_mode": { @@ -519,7 +698,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.12.11" } }, "nbformat": 4, diff --git a/docs/user/Notebooks/html5/annulus_convection_plot.html b/docs/user/Notebooks/html5/annulus_convection_plot.html index af9d2860..81580887 100644 --- a/docs/user/Notebooks/html5/annulus_convection_plot.html +++ b/docs/user/Notebooks/html5/annulus_convection_plot.html @@ -7,11 +7,11 @@
diff --git a/docs/user/Notebooks/html5/echidna_plot.html b/docs/user/Notebooks/html5/echidna_plot.html index 27ca108c..9df60b7a 100644 --- a/docs/user/Notebooks/html5/echidna_plot.html +++ b/docs/user/Notebooks/html5/echidna_plot.html @@ -7,11 +7,11 @@
diff --git a/docs/user/Notebooks/html5/sine_squared.html b/docs/user/Notebooks/html5/sine_squared.html index e2df0865..636ddeaa 100644 --- a/docs/user/Notebooks/html5/sine_squared.html +++ b/docs/user/Notebooks/html5/sine_squared.html @@ -7,11 +7,11 @@
diff --git a/docs/user/Notebooks/html5/spherical_mesh_plot.html b/docs/user/Notebooks/html5/spherical_mesh_plot.html index 163d89e2..34d23ff6 100644 --- a/docs/user/Notebooks/html5/spherical_mesh_plot.html +++ b/docs/user/Notebooks/html5/spherical_mesh_plot.html @@ -7,11 +7,11 @@
diff --git a/docs/user/Notebooks/html5/stokes_annulus_plot.html b/docs/user/Notebooks/html5/stokes_annulus_plot.html index f8b38f84..e8457c8c 100644 --- a/docs/user/Notebooks/html5/stokes_annulus_plot.html +++ b/docs/user/Notebooks/html5/stokes_annulus_plot.html @@ -7,11 +7,11 @@
diff --git a/docs/user/Notebooks/html5/temperature_plot.html b/docs/user/Notebooks/html5/temperature_plot.html index b7f4bdc9..5ad8544e 100644 --- a/docs/user/Notebooks/html5/temperature_plot.html +++ b/docs/user/Notebooks/html5/temperature_plot.html @@ -7,11 +7,11 @@
diff --git a/setup.py b/setup.py index 6066b7c3..4816dc9d 100644 --- a/setup.py +++ b/setup.py @@ -10,6 +10,7 @@ import numpy import petsc4py + def configure(): INCLUDE_DIRS = [] @@ -22,6 +23,7 @@ def configure(): # try get PETSC_DIR from petsc pip installation try: import petsc + PETSC_DIR = petsc.get_petsc_dir() except: pass @@ -43,8 +45,15 @@ def configure(): if os.environ.get("CONDA_PREFIX") and not os.environ.get("PETSC_DIR"): import sys + py_version = f"{sys.version_info.major}.{sys.version_info.minor}" - PETSC_DIR = os.path.join(os.environ["CONDA_PREFIX"],"lib","python"+py_version, "site-packages", "petsc") # symlink to latest python + PETSC_DIR = os.path.join( + os.environ["CONDA_PREFIX"], + "lib", + "python" + py_version, + "site-packages", + "petsc", + ) # symlink to latest python PETSC_ARCH = os.environ.get("PETSC_ARCH", "") else: PETSC_DIR = os.environ["PETSC_DIR"] @@ -54,7 +63,6 @@ def configure(): print(f"PETSC_DIR: {PETSC_DIR}") print(f"PETSC_ARCH: {PETSC_ARCH}") - from os.path import join, isdir if PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH)): @@ -63,13 +71,15 @@ def configure(): join(PETSC_DIR, "include"), ] LIBRARY_DIRS += [join(PETSC_DIR, PETSC_ARCH, "lib")] - petscvars = join(PETSC_DIR,PETSC_ARCH,"lib","petsc","conf","petscvariables") + petscvars = join( + PETSC_DIR, PETSC_ARCH, "lib", "petsc", "conf", "petscvariables" + ) else: if PETSC_ARCH: pass # XXX should warn ... INCLUDE_DIRS += [join(PETSC_DIR, "include")] LIBRARY_DIRS += [join(PETSC_DIR, "lib")] - petscvars = join(PETSC_DIR,"lib","petsc","conf","petscvariables") + petscvars = join(PETSC_DIR, "lib", "petsc", "conf", "petscvariables") LIBRARIES += ["petsc"] @@ -77,12 +87,12 @@ def configure(): # This ought include mpi's details, ie mpicc --showme, # needed to compile UW cython extensions compiler = "" - with open(petscvars,"r") as f: + with open(petscvars, "r") as f: for line in f: line = line.strip() if line.startswith("CC ="): - compiler = line.split("=",1)[1].strip() - #print(f"***\n The c compiler is: {compiler}\n*****") + compiler = line.split("=", 1)[1].strip() + # print(f"***\n The c compiler is: {compiler}\n*****") os.environ["CC"] = compiler # PETSc for Python @@ -107,6 +117,15 @@ def configure(): extra_compile_args = ["-O3", "-g"] # extra_compile_args = ['-O0', '-g'] extensions = [ + Extension( + "underworld3.ckdtree", + sources=[ + "src/underworld3/ckdtree.pyx", + ], + extra_compile_args=extra_compile_args + ["-std=c++11"], + language="c++", + **conf, + ), Extension( "underworld3.cython.petsc_discretisation", sources=[ @@ -146,7 +165,7 @@ def configure(): "src/underworld3/function/petsc_tools.c", ], extra_compile_args=extra_compile_args, - define_macros=[('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')], + define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")], **conf, ), Extension( @@ -160,6 +179,7 @@ def configure(): ), ] + # util function to get version information from file with __version__= def get_version(filename): try: @@ -170,21 +190,23 @@ def get_version(filename): version = line.split('"')[1].strip().strip('"').strip("'") return version except FileNotFoundError: - print( f"Cannot get version information from {filename}" ) + print(f"Cannot get version information from {filename}") except: raise + # Create uwid if it doesn't exist -idfile = './src/underworld3/_uwid.py' +idfile = "./src/underworld3/_uwid.py" if not os.path.isfile(idfile): import uuid + with open(idfile, "w+") as f: - f.write("uwid = \'" + str(uuid.uuid4()) + "\'") - + f.write("uwid = '" + str(uuid.uuid4()) + "'") + setup( name="underworld3", packages=find_packages(), - version=get_version('./src/underworld3/_version.py'), + version=get_version("./src/underworld3/_version.py"), package_data={"underworld3": ["*.pxd", "*.h", "function/*.h", "cython/*.pxd"]}, ext_modules=cythonize( extensions, diff --git a/src/underworld3/__init__.py b/src/underworld3/__init__.py index 15285c5f..8e8be3dc 100644 --- a/src/underworld3/__init__.py +++ b/src/underworld3/__init__.py @@ -127,6 +127,7 @@ def view(): import underworld3.maths import underworld3.utilities import underworld3.kdtree +import underworld3.ckdtree import underworld3.cython import underworld3.scaling import underworld3.visualisation @@ -137,45 +138,54 @@ def view(): from ._uwid import uwid as _id except: import uuid as _uuid + _id = str(_uuid.uuid4()) ########################################### ####### For User telemetry metrics ######## -# setup metrics function *only* if we are rank==0. +# setup metrics function *only* if we are rank==0. # Metric can be disables by setting UW_NO_USAGE_METRICS env var. # TODO: confirm not other condition like tests? -if (underworld3.mpi.rank == 0): +if underworld3.mpi.rank == 0: + def _sendData(): import os + # disable collection of data if requested if "UW_NO_USAGE_METRICS" not in os.environ: # get platform info import platform - sysinfo = platform.system() + + sysinfo = platform.system() sysinfo += "__" + platform.machine() # check if docker - if (os.path.isfile("/.dockerinit")): + if os.path.isfile("/.dockerinit"): sysinfo += "__docker" - event_dict = { "properties": { - "version" : underworld3.__version__, - "platform" : sysinfo, - "run_size" : underworld3.mpi.size, - "distinct_id" : _id, - } - } + event_dict = { + "properties": { + "version": underworld3.__version__, + "platform": sysinfo, + "run_size": underworld3.mpi.size, + "distinct_id": _id, + } + } # send info async import threading - thread = threading.Thread( target=underworld3.utilities._utils.postHog, args=("import_uw3", event_dict) ) + + thread = threading.Thread( + target=underworld3.utilities._utils.postHog, + args=("import_uw3", event_dict), + ) thread.daemon = True thread.start() try: _sendData() - except: # continue quietly if something above failed + except: # continue quietly if something above failed pass ####### END User telemetry metrics ######## diff --git a/src/underworld3/ckdtree.pyx b/src/underworld3/ckdtree.pyx new file mode 100644 index 00000000..39e12c69 --- /dev/null +++ b/src/underworld3/ckdtree.pyx @@ -0,0 +1,354 @@ +from types import WrapperDescriptorType +import underworld3 +import underworld3 as uw +import underworld3.timing as timing +import numpy +import numpy as np + +from libcpp cimport bool + + +cdef extern from "kdtree_interface.hpp" nogil: + cdef cppclass KDTree_Interface: + KDTree_Interface() + KDTree_Interface( const double* points, int numpoints, int dim ) + void build_index() + void find_closest_point( size_t num_coords, const double* coords, long unsigned int* indices, double* out_dist_sqr, bool* found ) + size_t knnSearch(const double* query_point, const size_t num_closest, long unsigned int* indices, double* out_dist_sqr ) + +cdef class cKDTree: + """ + KD-Tree indexes are data structures and algorithms for the efficient + determination of nearest neighbours. + + This class generates a kd-tree index for the provided points, and provides + the necessary methods for finding which points are closest to a given query + location. + + This class utilises `nanoflann` for kd-tree functionality. + + Parameters + ---------- + points: + The points for which the kd-tree index will be build. This + should be a 2-dimensional array of size (n_points,dim). + + Example + ------- + >>> import numpy as np + >>> import underworld3 as uw + + Generate a random set of points + >>> pts = np.random.random( size=(100,2) ) + + Build the index on the points + >>> index = uw.algorithms.KDTree(pts) + >>> index.build_index() + + Search the index for a coordinate + >>> coord = np.zeros((1,2)) + >>> coord[0] = (0.5,0.5) + >>> indices, dist_sqr, found = index.find_closest_point(coord) + + Confirm that a point has been found + >>> found[0] + True + + """ + cdef KDTree_Interface* index + cdef const double[:,::1] points + + def __cinit__( self, + const double[:,::1] points not None: numpy.ndarray ) : + + if points.shape[1] not in (2,3): + raise RuntimeError(f"Provided points array dimensionality must be 2 or 3, not {points.shape[1]}.") + self.points = points + self.index = new KDTree_Interface( &points[0][0], points.shape[0], points.shape[1]) + super().__init__() + + def __dealloc__(self): + del self.index + + @timing.routine_timer_decorator + def build_index(self): + """ + Build the kd-tree index. + """ + self.index.build_index() + + def kdtree_points(self): + """ + Returns a view of the points used to define the kd-tree + """ + + return np.array(self.points) + + + @timing.routine_timer_decorator + def find_closest_point(self, + const double[:,::1] coords not None: numpy.ndarray): + """ + Find the points closest to the provided set of coordinates. + + Parameters + ---------- + coords: + An array of coordinates for which the kd-tree index will be searched for nearest + neighbours. This should be a 2-dimensional array of size (n_coords,dim). + + Returns + ------- + indices: + An integer array of indices into the `points` array (passed into the constructor) corresponding to + the nearest neighbour for the search coordinates. It will be of size (n_coords). + dist_sqr: + A float array of squared distances between the provided coords and the nearest neighbouring + points. It will be of size (n_coords). + found: + A bool array of flags which signals whether a nearest neighbour has been found for a given + coordinate. It will be of size (n_coords). + + + + """ + if coords.shape[1] != self.points.shape[1]: + raise RuntimeError(f"Provided coords array dimensionality ({coords.shape[1]}) is different to points dimensionality ({self.points.shape[1]}).") + + count = coords.shape[0] + indices = np.empty(count, dtype=np.uint64, order='C') + dist_sqr = np.empty(count, dtype=np.float64, order='C') + found = np.empty(count, dtype=np.bool_, order='C') + + cdef long unsigned int[::1] c_indices = indices + cdef double[::1] c_dist_sqr = dist_sqr + cdef bool[::1] c_found = found + self.index.find_closest_point(count, + < const double *> &coords[0][0], + &c_indices[0], + < double*> &c_dist_sqr[0], + < bool*> &c_found[0] ) + return indices, dist_sqr, found + + @timing.routine_timer_decorator + def find_closest_n_points(self, + const int nCount : numpy.int64, + const double[: ,::1] coords not None: numpy.ndarray): + """ + Find the n points closest to the provided coordinates. + + Parameters + ---------- + nCount: + The number of nearest neighbour points to find for each `coords`. + + coords: + Coordinates of the points for which the kd-tree index will be searched for nearest + neighbours. This should be a 2-dimensional array of size (n_coords,dim). + + Returns + ------- + indices: + An integer array of indices into the `points` array (passed into the constructor) corresponding to + the nearest neighbour for the search coordinates. It will be of size (n_coords). + dist_sqr: + A float array of squred distances between the provided coords and the nearest neighbouring + points. It will be of size (n_coords). + + """ + + if coords.shape[1] != self.points.shape[1]: + raise RuntimeError(f"Provided coords array dimensionality ({coords.shape[1]}) is different to points dimensionality ({self.points.shape[1]}).") + nInput = coords.shape[0] + + # allocate numpy arrays - + + n_indices = np.empty((coords.shape[0], nCount), dtype=np.uint64, order='C') + n_dist_sqr = np.empty((coords.shape[0], nCount), dtype=np.float64, order='C') + + indices = np.empty(nCount, dtype=np.uint64, order='C') + dist_sqr = np.empty(nCount, dtype=np.float64, order='C') + + # allocate memoryviews in C contiguous layout + cdef long unsigned int[::1] c_indices = indices + cdef double[::1] c_dist_sqr = dist_sqr + + # Build the array one point at a time + + for p in range(coords.shape[0]): + self.index.knnSearch( &coords[p][0], + nCount, + &c_indices[0], + < double*> &c_dist_sqr[0]) + + n_indices[p,:] = indices[:] + n_dist_sqr[p,:] = dist_sqr[:] + + # return numpy data + return n_indices, n_dist_sqr + + @timing.routine_timer_decorator + def query(self, + const double[: ,::1] coords not None: numpy.ndarray, + const int k : numpy.int64, + bool sqr_dists=True, + + ): + """ + Find the n points closest to the provided coordinates. + """ + + i, d = self.find_closest_n_points(k, coords) + + if sqr_dists: + return numpy.sqrt(d), i + else: + return d, i + + +## A general point-to-point rbf interpolator here +## NOTE this is not using cython optimisation for numpy + + # For backward compatibility, default for the rbf_interpolator function + # is the _from_kdtree version + + def rbf_interpolator_local(self, + coords, + data, + nnn = 4, + verbose = False, + ): + + return self.rbf_interpolator_local_from_kdtree( + coords, data, nnn, verbose, + ) + + + + def rbf_interpolator_local_from_kdtree(self, + coords, + data, + nnn = 4, + verbose = False, + ): + + ''' + An inverse (squared) distance weighted mapping of a numpy array from the + set of coordinates defined by the kd-tree to the set of input points specified. + This assumes all points are local to the same processor. + If that is not the case, it is best to use a particle swarm + to manage the distributed data. + ''' + + if coords.shape[1] != self.points.shape[1]: + raise RuntimeError(f"Interpolation coordinates dimensionality ({coords.shape[1]}) is different to kD-tree dimensionality ({self.points.shape[1]}).") + nInput = coords.shape[0] + + if data.shape[0] != self.points.shape[0]: + raise RuntimeError(f"Data does not match kD-tree size array ({data.shape[0]}) v ({self.points.shape[0]}).") + + coords_contiguous = np.ascontiguousarray(coords) + + closest_n, distance_n = self.find_closest_n_points(nnn, coords_contiguous) + + num_local_points = coords.shape[0] + try: + data_size = data.shape[1] + except IndexError: + data_size = 1 + data = data.reshape(-1,1) + Values = np.zeros((num_local_points, data_size)) + Weights = np.zeros((num_local_points, 1)) + + if verbose and uw.mpi.rank == 0: + print("Mapping values ... start", flush=True) + + epsilon = 1.0e-9 + for j in range(nnn): + j_distance = epsilon + np.sqrt(distance_n[:, j]) + Weights[:, 0] += 1.0 / j_distance[:] + + for d in range(data_size): + for j in range(nnn): + j_distance = epsilon + np.sqrt(distance_n[:, j]) + j_nearest = closest_n[:, j] + Values[:, d] += data[j_nearest, d] / j_distance + + Values[...] /= Weights[:] + + if verbose and uw.mpi.rank == 0: + print("Mapping values ... done", flush=True) + + del coords_contiguous + del closest_n + del distance_n + del Weights + + return Values + + def rbf_interpolator_local_to_kdtree(self, + coords, + data, + nnn = 4, + verbose = False, + weights = None + ): + + ''' + An inverse (squared) distance weighted mapping of a numpy array to the + set of coordinates defined by the kd-tree from the set of input points specified. + This assumes all points are local to the same processor. + If that is not the case, it is sensible to use a particle swarm + to manage the distributed data. + ''' + + if coords.shape[1] != self.points.shape[1]: + raise RuntimeError(f"Interpolation coordinates dimensionality ({coords.shape[1]}) is different to kD-tree dimensionality ({self.points.shape[1]}).") + nInput = coords.shape[0] + + if data.shape[0] != coords.shape[0]: + raise RuntimeError(f"Data does not match coords size array ({data.shape[0]}) v ({coords.shape[0]}).") + + coords_contiguous = np.ascontiguousarray(coords) + + closest_n, distance_n = self.find_closest_n_points(nnn, coords_contiguous) + + num_local_points = self.points.shape[0] + try: + data_size = data.shape[1] + except IndexError: + data_size = 1 + data = data.reshape(-1,1) + + Values = np.zeros((num_local_points, data_size)) + Weights = np.zeros((num_local_points, 1)) + + if verbose and uw.mpi.rank == 0: + print(f"Mapping values ... start", flush=True) + + epsilon = 1.0e-9 + for j in range(nnn): + j_distance = epsilon + np.sqrt(distance_n[:, j]) + Weights[closest_n[:,j], 0] += 1.0 / j_distance[:] + + for d in range(data_size): + for j in range(nnn): + j_distance = epsilon + np.sqrt(distance_n[:, j]) + j_nearest = closest_n[:, j] + Values[j_nearest, d] += data[:, d] / j_distance + + # In this case, weights may be zero + Values[Weights!=0] /= Weights[Weights!=0] + + if verbose and uw.mpi.rank == 0: + print("Mapping values ... done", flush=True) + + if isinstance(weights, np.ndarray): + weights[...] = Weights[...] + + del coords_contiguous + del closest_n + del distance_n + del Weights + + return Values diff --git a/src/underworld3/cython/petsc_discretisation.pyx b/src/underworld3/cython/petsc_discretisation.pyx index f4560596..7091cbde 100644 --- a/src/underworld3/cython/petsc_discretisation.pyx +++ b/src/underworld3/cython/petsc_discretisation.pyx @@ -1,6 +1,6 @@ from typing import Optional, Tuple, Union from petsc4py import PETSc -import underworld3 as uw +import underworld3 as uw # from cython cimport view # comment this line to see what will happen import numpy as np @@ -15,10 +15,10 @@ include "petsc_extras.pxi" def petsc_fvm_get_min_radius(mesh) -> float: """ This method returns the minimum distance from any cell centroid to a face. - It wraps to the PETSc `DMPlexGetMinRadius` routine. + It wraps to the PETSc `DMPlexGetMinRadius` routine. """ - ## Note: The petsc4py version of DMPlexComputeGeometryFVM does not compute all cells and + ## Note: The petsc4py version of DMPlexComputeGeometryFVM does not compute all cells and ## does not obtain the minimum radius for the mesh. cdef Vec cellgeom = Vec() @@ -26,7 +26,7 @@ def petsc_fvm_get_min_radius(mesh) -> float: cdef DM dm = mesh.dm DMPlexComputeGeometryFVM(dm.dm,&cellgeom.vec,&facegeom.vec) - + min_radius = dm.getMinRadius() cellgeom.destroy() facegeom.destroy() @@ -36,10 +36,10 @@ def petsc_fvm_get_min_radius(mesh) -> float: def petsc_fvm_get_local_cell_sizes(mesh) -> np.array: """ This method returns the minimum distance from any cell centroid to a face. - It wraps to the PETSc `DMPlexGetMinRadius` routine. + It wraps to the PETSc `DMPlexGetMinRadius` routine. """ - ## Note: The petsc4py version of DMPlexComputeGeometryFVM does not compute all cells and + ## Note: The petsc4py version of DMPlexComputeGeometryFVM does not compute all cells and ## does not obtain the minimum radius for the mesh. cdef Vec cellgeom = Vec() @@ -73,20 +73,20 @@ def petsc_dm_create_submesh_from_label(incoming_dm, boundary_label_name, boundar cdef PetscBool markedFaces = marked_faces subdm = PETSc.DM() - + DMGetLabel(c_dm.dm, "Boundary", &dmlabel) # DMPlexCreateSubmesh(dm.dm, dmlabel, value, markedFaces, &subdm.dm) - return + return - -# This is not cython, does it need to be here or in discretisation.py ? + +# This is not cython, does it need to be here or in discretisation.py ? def petsc_dm_find_labeled_points_local(dm, label_name, sectionIndex=False, verbose=False): - '''Identify local points associated with "Label" - - dm -> expects a petscDM object + '''Identify local points associated with "Label" + + dm -> expects a petscDM object label_name -> "String Name for Label" sectionIndex -> False: leave points as indexed by the relevant section on the dm True: index into the local coordinate array @@ -103,7 +103,7 @@ def petsc_dm_find_labeled_points_local(dm, label_name, sectionIndex=False, verbo if uw.mpi.rank == 0: print(f"Label {label_name} is not present on the dm") return np.array([0]) - + pointIS = dm.getStratumIS("depth",0) edgeIS = dm.getStratumIS("depth",1) faceIS = dm.getStratumIS("depth",2) @@ -114,9 +114,9 @@ def petsc_dm_find_labeled_points_local(dm, label_name, sectionIndex=False, verbo _, iset_lab = label.convertToSection() - IndicesP = np.intersect1d(iset_lab.getIndices(), pointIS.getIndices()) + IndicesP = np.intersect1d(iset_lab.getIndices(), pointIS.getIndices()) IndicesE = np.intersect1d(iset_lab.getIndices(), edgeIS.getIndices()) - IndicesF = np.intersect1d(iset_lab.getIndices(), faceIS.getIndices()) + IndicesF = np.intersect1d(iset_lab.getIndices(), faceIS.getIndices()) # print(f"Label {label_name}") # print(f"P -> {len(IndicesP)}, E->{len(IndicesE)}, F->{len(IndicesF)},") @@ -147,11 +147,11 @@ def petsc_dm_find_labeled_points_local(dm, label_name, sectionIndex=False, verbo ## Todo ! -""" +""" def petsc_dm_get_periodicity(incoming_dm): dim = incoming_dm.getDimension() - + cdef PetscInt c_dim = dim cdef PetscReal c_maxCell[3] cdef PetscReal c_Lstart[3] @@ -174,19 +174,19 @@ def petsc_dm_get_periodicity(incoming_dm): Lx = c_L[0] Ly = c_L[1] Lz = c_L[2] - - + + print(f"Max x - {maxx}, y - {maxy}, z - {maxz}" ) print(f"Ls x - {Lstartx}, y - {Lstarty}, z - {Lstartz}" ) print(f"L x - {Lx}, y - {Ly}, z - {Lz}" ) - return + return """ def petsc_dm_set_periodicity(incoming_dm, maxCell, Lstart, L): - """ + """ Wrapper for PETSc DMSetPeriodicity: - - maxCell - Over distances greater than this, we can assume a point has crossed over to another sheet, when trying to localize cell coordinates. + - maxCell - Over distances greater than this, we can assume a point has crossed over to another sheet, when trying to localize cell coordinates. Pass NULL to remove such information. - Lstart - If we assume the mesh is a torus, this is the start of each coordinate, or NULL for 0.0 - L - If we assume the mesh is a torus, this is the length of each coordinate, otherwise it is < 0.0 @@ -202,7 +202,7 @@ def petsc_dm_set_periodicity(incoming_dm, maxCell, Lstart, L): maxCell3[i] = maxCell[i] Lstart3[i] = Lstart[i] L3[i] = L[i] - + cdef DM c_dm = incoming_dm cdef PetscInt c_dim = dim cdef PetscReal c_maxCell[3] @@ -218,7 +218,7 @@ def petsc_dm_set_periodicity(incoming_dm, maxCell, Lstart, L): # incoming_dm.localizeCoordinates() - return + return def petsc_vec_concatenate( inputVecs ): @@ -234,7 +234,7 @@ def petsc_vec_concatenate( inputVecs ): cdef PetscVec cvecs[100] cdef Vec output_cVec = Vec() - for i from 0 <= i < nx: + for i from 0 <= i < nx: cvecs[i] = (inputVecs[i]).vec ierr = VecConcatenate(nx, cvecs, &output_cVec.vec, NULL) @@ -242,3 +242,8 @@ def petsc_vec_concatenate( inputVecs ): outputVec = output_cVec return outputVec + + +def petsc_get_swarm_coord_name( sdm ): + + return diff --git a/src/underworld3/discretisation.py b/src/underworld3/discretisation.py index 6a195abb..6ffe0647 100644 --- a/src/underworld3/discretisation.py +++ b/src/underworld3/discretisation.py @@ -2,6 +2,7 @@ from enum import Enum import os +from mpi4py.MPI import Info import numpy import sympy from sympy.matrices.expressions.blockmatrix import bc_dist @@ -11,8 +12,10 @@ from underworld3.utilities._api_tools import Stateful from underworld3.utilities._api_tools import uw_object +from underworld3.utilities._utils import gather_data from underworld3.coordinates import CoordinateSystem, CoordinateSystemType + from underworld3.cython import petsc_discretisation import underworld3.timing as timing @@ -165,6 +168,8 @@ def __init__( filename=None, refinement=None, refinement_callback=None, + coarsening=None, + coarsening_callback=None, return_coords_to_bounds=None, boundaries=None, boundary_normals=None, @@ -179,11 +184,22 @@ def __init__( comm = PETSc.COMM_WORLD if isinstance(plex_or_meshfile, PETSc.DMPlex): + isDistributed = plex_or_meshfile.isDistributed() if verbose and uw.mpi.rank == 0: - print(f"Constructing UW mesh from DMPlex object", flush=True) + print( + f"Constructing UW mesh from DMPlex object (distributed == {isDistributed})", + flush=True, + ) + if verbose: + plex_or_meshfile.view() + name = "plexmesh" self.dm = plex_or_meshfile self.sf0 = None # Should we build one ? + + # Don't set from options — don't want to redistribute the dm + # or change any settings as this should be left to the user + else: comm = kwargs.get("comm", PETSc.COMM_WORLD) name = plex_or_meshfile @@ -242,6 +258,11 @@ def __init__( f.close() + # This needs to be done when reading a dm from a checkpoint + # or building from an imported mesh format + + self.dm.setFromOptions() + else: raise RuntimeError( "Mesh file %s has unknown format '%s'." @@ -276,7 +297,10 @@ class replacement_boundaries(Enum): # options.delValue("dm_plex_gmsh_mark_vertices") # options.delValue("dm_plex_gmsh_multiple_tags") # options.delValue("dm_plex_gmsh_use_regions") - self.dm.setFromOptions() + # + + # Only for newly created dm (from mesh files) + # self.dm.setFromOptions() # uw.adaptivity._dm_stack_bcs(self.dm, self.boundaries, "UW_Boundaries") @@ -315,8 +339,11 @@ class replacement_boundaries(Enum): uw.mpi.barrier() ## --- + ## Note - coarsening callback is tricky because the coarse meshes do not have the labels + ## self.refinement_callback = refinement_callback + self.coarsening_callback = coarsening_callback self.name = name self.sf1 = None self.return_coords_to_bounds = return_coords_to_bounds @@ -328,6 +355,10 @@ class replacement_boundaries(Enum): f"Mesh refinement levels: {refinement}", flush=True, ) + print( + f"Mesh coarsening levels: {coarsening}", + flush=True, + ) uw.mpi.barrier() @@ -359,6 +390,7 @@ class replacement_boundaries(Enum): self.dm_h = self.dm_hierarchy[-1] self.dm_h.setName("uw_hierarchical_dm") + # Is this needed here, after the above calls ? if callable(refinement_callback): for dm in self.dm_hierarchy: refinement_callback(dm) @@ -366,6 +398,31 @@ class replacement_boundaries(Enum): # Single level equivalent dm (needed for aux vars ?? Check this - LM) self.dm = self.dm_h.clone() + elif not coarsening is None and coarsening > 0: + + # Does this have any effect on a coarsening strategy ? + self.dm.setRefinementUniform() + + if not self.dm.isDistributed(): + self.dm.distribute() + + self.dm_hierarchy = [self.dm] + for i in range(coarsening): + dm_coarsened = self.dm_hierarchy[i].coarsen() + self.dm_hierarchy[i].setCoarseDM(dm_coarsened) + self.dm_hierarchy.append(dm_coarsened) + + # Coarsest mesh should be first in the hierarchy to be consistent + # with the way we manage refinements + self.dm_hierarchy.reverse() + + self.dm_h = self.dm_hierarchy[-1] + self.dm_h.setName("uw_hierarchical_dm") + + # Single level equivalent dm (needed for aux vars ?? Check this - LM) + self.dm = self.dm_h.clone() + # self.dm_hierarchy[0].view() + else: if not self.dm.isDistributed(): self.dm.distribute() @@ -454,6 +511,30 @@ class replacement_boundaries(Enum): self.degree = degree self.qdegree = qdegree + # Populate the element information for this mesh. This is intended to be + # human readable because the mesh is quite simple: either quads / tris in 2D + # tetrahedra / hexahedra in 3D + + from dataclasses import dataclass + + @dataclass + class ElementInfo: + type: str + entities: tuple + face_entities: tuple + + if self.dm.isSimplex(): + if self.dim == 2: + self._element = ElementInfo("triangle", (1, 3, 3), (0, 1, 2)) + else: + self._element = ElementInfo("tetrahedron", (1, 4, 6, 4), (0, 1, 3, 3)) + else: + if self.dim == 2: + self._element = ElementInfo("quadrilateral", (1, 4, 4), (0, 1, 2)) + else: + self._element = ElementInfo("hexahedron", (1, 6, 12, 8), (0, 1, 4, 4)) + + # Navigation / coordinates etc self.nuke_coords_and_rebuild() if verbose and uw.mpi.rank == 0: @@ -508,40 +589,56 @@ def cdim(self) -> int: """ return self.dm.getCoordinateDim() + @property + def element(self) -> dict: + """ + The element information of the mesh (no mixed meshes in uw3) so this + applies to every cell of the `mesh dmplex object` + """ + + return self._element + def view(self, level=0): - ''' + """ Displays mesh information at different levels. - + Parameters ---------- - level : int (0 default) - The display level. + level : int (0 default) + The display level. 0, for basic mesh information (variables and boundaries), while level=1 displays detailed mesh information (including PETSc information) - ''' + """ import numpy as np - if level==0: + + if level == 0: if uw.mpi.rank == 0: print(f"\n") print(f"Mesh # {self.instance}: {self.name}\n") uw.visualisation.plot_mesh(self) - #Total number of cells - nstart, nend=self.dm.getHeightStratum(0) - num_cells=nend-nstart + # Total number of cells + nstart, nend = self.dm.getHeightStratum(0) + num_cells = nend - nstart print(f"Number of cells: {num_cells}\n") if len(self.vars) > 0: - print(f"| Variable Name | component | degree | type |") - print(f"| ---------------------------------------------------------- |") + print( + f"| Variable Name | component | degree | type |" + ) + print( + f"| ---------------------------------------------------------- |" + ) for vname in self.vars.keys(): v = self.vars[vname] print( f"| {v.clean_name:<20}|{v.num_components:^10} |{v.degree:^7} | {v.vtype.name:^15} |" ) - print(f"| ---------------------------------------------------------- |") + print( + f"| ---------------------------------------------------------- |" + ) print("\n", flush=True) else: print(f"No variables are defined on the mesh\n", flush=True) @@ -576,13 +673,6 @@ def view(self, level=0): flush=True, ) - # ## PETSc marked boundaries: - # l = self.dm.getLabel("All_Boundaries") - # if l: - # i = l.getStratumSize(1001) - # else: - # i = 0 - ii = uw.utilities.gather_data(np.array([i]), dtype="int") if uw.mpi.rank == 0: @@ -614,27 +704,33 @@ def view(self, level=0): # self.dm.view() print(f"Use view(1) to view detailed mesh information.\n") - elif level==1: + elif level == 1: if uw.mpi.rank == 0: print(f"\n") print(f"Mesh # {self.instance}: {self.name}\n") uw.visualisation.plot_mesh(self) - #Total number of cells - nstart, nend=self.dm.getHeightStratum(0) - num_cells=nend-nstart + # Total number of cells + nstart, nend = self.dm.getHeightStratum(0) + num_cells = nend - nstart print(f"Number of cells: {num_cells}\n") if len(self.vars) > 0: - print(f"| Variable Name | component | degree | type |") - print(f"| ---------------------------------------------------------- |") + print( + f"| Variable Name | component | degree | type |" + ) + print( + f"| ---------------------------------------------------------- |" + ) for vname in self.vars.keys(): v = self.vars[vname] print( f"| {v.clean_name:<20}|{v.num_components:^10} |{v.degree:^7} | {v.vtype.name:^15} |" ) - print(f"| ---------------------------------------------------------- |") + print( + f"| ---------------------------------------------------------- |" + ) print("\n", flush=True) else: print(f"No variables are defined on the mesh\n", flush=True) @@ -707,7 +803,9 @@ def view(self, level=0): self.dm.view() else: - print(f"\n Please use view() or view(0) for default view and view(1) for a detailed view of the mesh.") + print( + f"\n Please use view() or view(0) for default view and view(1) for a detailed view of the mesh." + ) def view_parallel(self): """ @@ -764,44 +862,6 @@ def view_parallel(self): ## Information on the mesh DM # self.dm.view() - # This only works for local - we can't access global information' - # and so this is not a suitable function for use during advection - # - # def _return_coords_to_bounds(self, coords, meshVar=None): - # """ - # Restore the provided coordinates to the interior of the domain. - # The default behaviour is to find the nearest node in the kdtree to each - # coordinate and use that value. If a meshVar is provided, we can use the nearest node - # for that discretisation instead. - - # This can be over-ridden for specific meshes - # (e.g. periodic) where a more appropriate choice is available. - # """ - - # import numpy as np - - # if meshVar is None: - # target_coords = self.data - # else: - # target_coords = meshVar.coords - - # ## Find which coords are invalid - - # invalid = self.get_closest_local_cells(coords) == -1 - - # if np.count_nonzero(invalid) == 0: - # return coords - - # print(f"{uw.mpi.rank}: Number of invalid coords {np.count_nonzero(invalid)}") - - # kdt = uw.kdtree.KDTree(target_coords) - # idx , _ , _ = kdt.find_closest_point(coords[invalid]) - - # valid_coords = coords.copy() - # valid_coords[invalid] = target_coords[idx] - - # return valid_coords - def clone_dm_hierarchy(self): """ Clone the dm hierarchy on the mesh @@ -1195,7 +1255,7 @@ def write_timestep( save_location = ( output_base_name + f".proxy.{svar.clean_name}.{index:05}.h5" ) - svar.write(save_location) + svar.write_proxy(save_location) if uw.mpi.rank == 0: checkpoint_xdmf( @@ -1483,8 +1543,114 @@ def _get_coords_for_basis(self, degree, continuous): return arrcopy + def _build_kd_tree_index_DS(self): + + if hasattr(self, "_index") and self._index is not None: + return + + # Build this from the PETScDS rather than the SWARM + + centroids = self._get_coords_for_basis(0, False) + index_coords = self._get_coords_for_basis(2, False) + + points_per_cell = index_coords.shape[0] // centroids.shape[0] + + cell_id = numpy.empty(index_coords.shape[0]) + for i in range(cell_id.shape[0]): + cell_id[i] = i // points_per_cell + + self._indexCoords = index_coords + self._index = uw.kdtree.KDTree(self._indexCoords) + # self._index.build_index() + self._indexMap = numpy.array(cell_id, dtype=numpy.int64) + + return + def _build_kd_tree_index(self): + if hasattr(self, "_index") and self._index is not None: + return + + dim = self.dim + # def mesh_face_skeleton_kdtree(mesh): + + cStart, cEnd = self.dm.getHeightStratum(0) + fStart, fEnd = self.dm.getHeightStratum(1) + pStart, pEnd = self.dm.getDepthStratum(0) + cell_num_faces = self.element.entities[1] + cell_num_points = self.element.entities[self.dim] + face_num_points = self.element.face_entities[self.dim] + + control_points_list = [] + control_points_cell_list = [] + + for cell, cell_id in enumerate(range(cStart, cEnd)): + + cell_faces = self.dm.getCone(cell_id) + points = self.dm.getTransitiveClosure(cell_id)[0][-cell_num_points:] + cell_point_coords = self.data[points - pStart] + cell_centroid = cell_point_coords.mean(axis=0) + + # for face in range(cell_num_faces): + + # points = self.dm.getTransitiveClosure(cell_faces[face])[0][ + # -face_num_points: + # ] + # point_coords = self.data[points - pStart] + + # face_centroid = point_coords.mean(axis=0) + # cell_centroid = cell_point_coords.mean(axis=0) + + # # 2D case + # if self.dim == 2: + # vector = point_coords[1] - point_coords[0] + # normal = numpy.array((-vector[1], vector[0])) + + # # 3D simplex case (probably also OK for hexes) + # else: + # normal = numpy.cross( + # (point_coords[1] - point_coords[0]), + # (point_coords[2] - point_coords[0]), + # ) + + # inward_outward = numpy.sign(normal.dot(face_centroid - cell_centroid)) + # normal *= inward_outward / numpy.sqrt(normal.dot(normal)) + + # inside_control_point = -1e-3 * normal + face_centroid + + # control_points_list.append(inside_control_point) + # control_points_cell_list.append(cell_id) + # control_points_list.append(cell_centroid) + # control_points_cell_list.append(cell_id) + + # Add points near the cell vertices + + for i in range(cell_point_coords.shape[0]): + control_points_list.append( + 0.99 * cell_point_coords[i] + 0.01 * cell_centroid + ) + control_points_cell_list.append(cell_id) + + # Add centroid + control_points_list.append(cell_centroid) + control_points_cell_list.append(cell_id) + + self._indexCoords = numpy.array(control_points_list) + self._index = uw.kdtree.KDTree(self._indexCoords, leafsize=8) + # self._index.build_index() + self._indexMap = numpy.array(control_points_cell_list, dtype=numpy.int64) + + # We don't need an indexMap for this one because there is only one point per cell + # and the returned kdtree value IS the index. + # Note: self._centroids is not yet defined: + + self._centroid_index = uw.kdtree.KDTree(self._get_coords_for_basis(0, False)) + # self._centroid_index.build_index() + + return + + def _build_kd_tree_index_PIC(self): + if hasattr(self, "_index") and self._index is not None: return @@ -1508,7 +1674,7 @@ def _build_kd_tree_index(self): # 3^dim or 4^dim pop is used. This number may need to be considered # more carefully, or possibly should be coded to be set dynamically. - tempSwarm.insertPointUsingCellDM(PETSc.DMSwarm.PICLayoutType.LAYOUT_GAUSS, 4) + tempSwarm.insertPointUsingCellDM(PETSc.DMSwarm.PICLayoutType.LAYOUT_GAUSS, 3) # We can't use our own populate function since this needs THIS kd_tree to exist # We will need to use a standard layout instead @@ -1522,14 +1688,260 @@ def _build_kd_tree_index(self): self._indexCoords = PIC_coords.copy() self._index = uw.kdtree.KDTree(self._indexCoords) self._indexMap = numpy.array(PIC_cellid, dtype=numpy.int64) + # self._index.build_index() + + # We don't need an indexMap for this one because there is only one point per cell + # and the returned kdtree value IS the index. + # Note: self._centroids is not yet defined: + + self._centroid_index = uw.kdtree.KDTree(self._get_coords_for_basis(0, False)) + # self._centroid_index.build_index() tempSwarm.restoreField("DMSwarmPIC_coor") - tempSwarm.restoreField("DMSwarm_cellid") + tempSwarm.restoreField("DMSwarm_cellid") # tempSwarm.destroy() return + # Note - need to add this to the mesh rebuilding triggers + def _mark_faces_inside_and_out(self): + """ + Create a collection of control point pairs that are slightly inside + and slightly outside each mesh face (mirrors to each other). This + allows a fast lookup of whether we on the inside or outside of the plane + defined by a face (i.e. same side or other side as the cell centroid). If we are inside + for all faces in a convex polyhedron, then we are inside the cell + """ + + if ( + hasattr(self, "faces_inner_control_points") + and self.faces_inner_control_points is not None + and hasattr(self, "faces_outer_control_points") + and self.faces_outer_control_points is not None + ): + return + + dim = self.dim + # def mesh_face_skeleton_kdtree(mesh): + + cStart, cEnd = self.dm.getHeightStratum(0) + fStart, fEnd = self.dm.getHeightStratum(1) + pStart, pEnd = self.dm.getDepthStratum(0) + num_local_cells = self.dm.getHeightStratum(0)[1] + cell_num_faces = self.element.entities[1] + cell_num_points = self.element.entities[self.dim] + face_num_points = self.element.face_entities[self.dim] + + # All elements in our mesh are a single type + + mesh_cell_outer_control_points = numpy.ndarray( + shape=(cell_num_faces, num_local_cells, self.dim) + ) + mesh_cell_inner_control_points = numpy.ndarray( + shape=(cell_num_faces, num_local_cells, self.dim) + ) + + for cell, cell_id in enumerate(range(cStart, cEnd)): + cell_faces = self.dm.getCone(cell_id) + points = self.dm.getTransitiveClosure(cell_id)[0][-cell_num_points:] + cell_point_coords = self.data[points - pStart] + + for face in range(cell_num_faces): + + points = self.dm.getTransitiveClosure(cell_faces[face])[0][ + -face_num_points: + ] + point_coords = self.data[points - pStart] + + face_centroid = point_coords.mean(axis=0) + cell_centroid = cell_point_coords.mean(axis=0) + + # 2D case + if self.dim == 2: + vector = point_coords[1] - point_coords[0] + normal = numpy.array((-vector[1], vector[0])) + + # 3D simplex case (probably also OK for hexes) + else: + normal = numpy.cross( + (point_coords[1] - point_coords[0]), + (point_coords[2] - point_coords[0]), + ) + + inward_outward = numpy.sign(normal.dot(face_centroid - cell_centroid)) + normal *= inward_outward / numpy.sqrt(normal.dot(normal)) + + outside_control_point = 1e-3 * normal + face_centroid + inside_control_point = -1e-3 * normal + face_centroid + + mesh_cell_outer_control_points[face, cell, :] = outside_control_point + mesh_cell_inner_control_points[face, cell, :] = inside_control_point + + self.faces_inner_control_points = mesh_cell_inner_control_points + self.faces_outer_control_points = mesh_cell_outer_control_points + + return + + def test_if_points_in_cells(self, points, cells): + """ + Determine if the given points lie in the suggested cells. + Uses a mesh skeletonization array to determine whether the point is + with the convex polygon / polyhedron defined by a cell. + + Exact if applied to a linear mesh, approximate otherwise. + """ + + self._mark_faces_inside_and_out() + + assert points.shape[0] == cells.shape[0] + + mesh = self + + cStart, cEnd = self.dm.getHeightStratum(0) + num_cell_faces = self.dm.getConeSize(cStart) + + inside = numpy.ones_like(cells, dtype=bool) + insiders = numpy.ndarray(shape=(cells.shape[0], num_cell_faces), dtype=bool) + + for f in range(num_cell_faces): + control_points_o = self.faces_outer_control_points[f, cells] + control_points_i = self.faces_inner_control_points[f, cells] + inside = ((control_points_o - points) ** 2).sum(axis=1) - ( + (control_points_i - points) ** 2 + ).sum(axis=1) > 0 + insiders[:, f] = inside[:] + + return numpy.all(insiders, axis=1) + + def _mark_local_boundary_faces_inside_and_out(self): + """ + Create a collection of control point pairs that are slightly inside + and slightly outside each boundary-defining face (mirrors to each other). This + allows a fast lookup of whether we on the inside or outside of the domain. + We cannot ensure convexity, so this is approximate when close to the boundary + """ + + if ( + hasattr(self, "boundary_face_control_points_kdtree") + and self.boundary_face_control_points_kdtree is not None + and hasattr(self, "boundary_face_control_points_sign") + and self.boundary_face_control_points_sign is not None + ): + return + + cStart, cEnd = self.dm.getHeightStratum(0) + fStart, fEnd = self.dm.getHeightStratum(1) + pStart, pEnd = self.dm.getDepthStratum(0) + cell_num_faces = self.element.entities[1] + cell_num_points = self.element.entities[self.dim] + face_num_points = self.element.face_entities[self.dim] + + boundary_faces = [] + for face in range(fStart, fEnd): + if self.dm.getJoin(face).shape[0] == 1: + boundary_faces.append(face) + + boundary_faces = numpy.array(boundary_faces) + + control_points_list = [] + control_point_sign_list = [] + + for face in boundary_faces: + cell = self.dm.getJoin(face)[0] + points = self.dm.getTransitiveClosure(face)[0][-face_num_points:] + point_coords = self.data[points - pStart] + face_centroid = point_coords.mean(axis=0) + cell_centroid = self._centroids[cell - cStart] + + # 2D case + if self.dim == 2: + vector = point_coords[1] - point_coords[0] + normal = numpy.array((-vector[1], vector[0])) + + else: + # 3D simplex case (probably also OK for hexes) + normal = numpy.cross( + (point_coords[1] - point_coords[0]), + (point_coords[2] - point_coords[0]), + ) + + inward_outward = numpy.sign(normal.dot(face_centroid - cell_centroid)) + normal *= inward_outward / numpy.sqrt(normal.dot(normal)) + + # Control points near centroid + + outside_control_point = 1e-8 * normal + face_centroid + control_points_list.append(outside_control_point) + control_point_sign_list.append(-1) + + inside_control_point = -1e-8 * normal + face_centroid + control_points_list.append(inside_control_point) + control_point_sign_list.append(1) + + # Control points closer to face nodes + + for pt in range(0, face_num_points): + + outside_control_point = ( + 1e-8 * normal + 0.8 * points[pt] + 0.2 * face_centroid + ) + control_points_list.append(outside_control_point) + control_point_sign_list.append(-1) + + inside_control_point = ( + -1e-8 * normal + 0.8 * points[pt] + 0.2 * face_centroid + ) + control_points_list.append(inside_control_point) + control_point_sign_list.append(1) + + control_point_kdtree = uw.kdtree.KDTree(numpy.array(control_points_list)) + control_point_sign = numpy.array(control_point_sign_list) + + self.boundary_face_control_points_kdtree = control_point_kdtree + self.boundary_face_control_points_sign = control_point_sign + + return + + def points_in_domain(self, points, strict_validation=True): + """ + Determine if the given points lie in this domain. + Uses a mesh-boundary skeletonization array to determine whether the point is + inside the boundary or outside. If close to the boundary, it checks if points + are in a cell. + + """ + + self._mark_local_boundary_faces_inside_and_out() + + max_radius = self.get_max_radius() + + if points.shape[0] == 0: + return False + + dist2, closest_control_points_ext = ( + self.boundary_face_control_points_kdtree.query(points, k=1, sqr_dists=True) + ) + in_or_not = ( + self.boundary_face_control_points_sign[closest_control_points_ext] > 0 + ) + + ## This choice of distance needs some more thought + + near_boundary = numpy.where(dist2 < max_radius**2)[0] + near_boundary_points = points[near_boundary] + + in_or_not[near_boundary] = ( + self.get_closest_local_cells(near_boundary_points) != -1 + ) + + if strict_validation: + chosen_ones = numpy.where(in_or_not == True)[0] + chosen_points = points[chosen_ones] + in_or_not[chosen_ones] = self.get_closest_local_cells(chosen_points) != -1 + + return in_or_not + @timing.routine_timer_decorator def get_closest_cells(self, coords: numpy.ndarray) -> numpy.ndarray: """ @@ -1559,10 +1971,11 @@ def get_closest_cells(self, coords: numpy.ndarray) -> numpy.ndarray: self._build_kd_tree_index() if len(coords) > 0: - #closest_points, dist, found = self._index.find_closest_point(coords) dist, closest_points = self._index.query(coords, k=1) if np.any(closest_points > self._index.n): - raise RuntimeError("An error was encountered attempting to find the closest cells to the provided coordinates.") + raise RuntimeError( + "An error was encountered attempting to find the closest cells to the provided coordinates." + ) else: ### returns an empty array if no coords are on a proc closest_points, dist, found = False, False, numpy.array([None]) @@ -1595,23 +2008,51 @@ def get_closest_local_cells(self, coords: numpy.ndarray) -> numpy.ndarray: """ import numpy as np + import numpy as np + # Create index if required self._build_kd_tree_index() if len(coords) > 0: dist, closest_points = self._index.query(coords, k=1) if np.any(closest_points > self._index.n): - raise RuntimeError("An error was encountered attempting to find the closest cells to the provided coordinates.") + raise RuntimeError( + "An error was encountered attempting to find the closest cells to the provided coordinates." + ) else: - return -1 + return np.zeros((0,)) - # This is tuned a little bit so that points on a single CPU are never lost + # We need to filter points that lie outside the mesh but + # still are allocated a nearby element by this distance-only check. cells = self._indexMap[closest_points] - #invalid = ( - # dist > 0.1 * self._radii[cells] ** 2 # 2.5 * self._search_lengths[cells] - #) # 0.25 * self._radii[cells] ** 2 - #cells[invalid] = -1 + cStart, cEnd = self.dm.getHeightStratum(0) + + inside = self.test_if_points_in_cells(coords, cells) + cells[~inside] = -1 + lost_points = np.where(inside == False)[0] + + # Part 2 - try to find the lost points by walking nearby cells + + num_local_cells = self._centroid_index.data_pts.shape[0] + num_testable_neighbours = min(num_local_cells, 50) + + dist2, closest_centroids = self._centroid_index.query( + coords[lost_points], k=num_testable_neighbours, sqr_dists=True + ) + + # This number is close to the point-point coordination value in 3D unstructured + # grids (by inspection) + + for i in range(0, num_testable_neighbours): + + inside = self.test_if_points_in_cells( + coords[lost_points], closest_centroids[:, i] + ) + cells[lost_points[inside]] = closest_centroids[inside, i] + + if np.count_nonzero(cells == -1) == 0: + break return cells @@ -1619,7 +2060,6 @@ def _get_mesh_sizes(self, verbose=False): """ Obtain the (local) mesh radii and centroids using This routine is called when the mesh is built / rebuilt - """ centroids = self._get_coords_for_basis(0, False) @@ -1638,8 +2078,7 @@ def _get_mesh_sizes(self, verbose=False): cell_points = self.dm.getTransitiveClosure(cell)[0][-cell_num_points:] cell_coords = self.data[cell_points - pStart] - #_, distsq, _ = centroids_kd_tree.find_closest_point(cell_coords) - distsq, _ = centroids_kd_tree.query(cell_coords, k=1) + distsq, _ = centroids_kd_tree.query(cell_coords, k=1, sqr_dists=True) cell_length[cell] = np.sqrt(distsq.max()) cell_r[cell] = np.sqrt(distsq.mean()) @@ -1669,6 +2108,14 @@ def _get_mesh_centroids(self): return centroids + def _get_domain_centroids(self): + + import numpy as np + + domain_centroid = self._centroids.mean(axis=0) + all_centroids = gather_data(domain_centroid, bcast=True).reshape(-1, self.dim) + return all_centroids + def get_min_radius_old(self) -> float: """ This method returns the global minimum distance from any cell centroid to a face. @@ -1720,13 +2167,6 @@ def get_max_radius(self) -> float: return all_max_radii.max() - # def get_boundary_subdm(self) -> PETSc.DM: - # """ - # This method returns the boundary subdm that wraps DMPlexCreateSubmesh - # """ - # from underworld3.petsc_discretisation import petsc_create_surface_submesh - # return petsc_create_surface_submesh(self, "Boundary", 666, ) - # This should be deprecated in favour of using integrals def stats(self, uw_function, uw_meshVariable, basis=None): """ @@ -2215,7 +2655,9 @@ def __getitem__(self, indices): # that is stable when used for EXTRAPOLATION but # not accurate. - def rbf_interpolate(self, new_coords, meth=0, p=2, verbose=False, nnn=None, rubbish=None): + def rbf_interpolate( + self, new_coords, meth=0, p=2, verbose=False, nnn=None, rubbish=None + ): # An inverse-distance mapping is quite robust here ... as long # as long we take care of the case where some nodes coincide (likely if used mesh2mesh) @@ -2234,7 +2676,9 @@ def rbf_interpolate(self, new_coords, meth=0, p=2, verbose=False, nnn=None, rubb print("Building K-D tree", flush=True) mesh_kdt = uw.kdtree.KDTree(self.coords) - values = mesh_kdt.rbf_interpolator_local(new_coords, D, nnn, p=p, verbose=verbose) + values = mesh_kdt.rbf_interpolator_local( + new_coords, D, nnn, p=p, verbose=verbose + ) del mesh_kdt return values diff --git a/src/underworld3/function/__init__.py b/src/underworld3/function/__init__.py index 1cb6d378..068f16ad 100644 --- a/src/underworld3/function/__init__.py +++ b/src/underworld3/function/__init__.py @@ -2,9 +2,10 @@ from ._function import ( UnderworldFunction, evaluate, - evalf, dm_swarm_get_migrate_type, dm_swarm_set_migrate_type, + _dmswarm_get_migrate_type, + _dmswarm_set_migrate_type, # evalf_at_coords, # _interpolate_all_vars_on_mesh, ) diff --git a/src/underworld3/function/_function.pyx b/src/underworld3/function/_function.pyx index 506e5d29..1235c021 100644 --- a/src/underworld3/function/_function.pyx +++ b/src/underworld3/function/_function.pyx @@ -22,6 +22,10 @@ cdef extern from "petsc.h" nogil: ctypedef enum DMSwarmMigrateType: pass +cdef extern from "petsc.h" nogil: + ctypedef enum DMSwarmType: + pass + cdef extern from "petsc_tools.h" nogil: PetscErrorCode DMInterpolationSetUp_UW(DMInterpolationInfo ipInfo, PetscDM dm, int petscbool, int petscbool, size_t* owning_cell) PetscErrorCode DMInterpolationEvaluate_UW(DMInterpolationInfo ipInfo, PetscDM dm, PetscVec x, PetscVec v) @@ -165,6 +169,146 @@ def evaluate( expr, coord_sys=None, other_arguments=None, simplify=True, + verbose=False, + evalf=False, + rbf=False,): + """ + Evaluate a given expression at a list of coordinates. + + Note it is not efficient to call this function to evaluate an expression at + a single coordinate. Instead the user should provide a numpy array of all + coordinates requiring evaluation. + + Parameters + ---------- + expr: sympy.Basic + Sympy expression requiring evaluation. + coords: numpy.ndarray + Numpy array of coordinates to evaluate expression at. + coord_sys: mesh.N vector coordinate system + + other_arguments: dict + Dictionary of other arguments necessary to evaluate function. + Not yet implemented. + + + """ + + + # Extract all the mesh/swarm variables in the expression and check that they all live on the + # same mesh. If not, this evaluation is not valid. + + varfns = set() + def unpack_var_fns(exp): + + if isinstance(exp,uw.function._function.UnderworldAppliedFunctionDeriv): + raise RuntimeError("Derivative functions are not handled in evaluations, a projection should be used first to create a mesh Variable.") + + isUW = isinstance(exp, uw.function._function.UnderworldAppliedFunction) + isMatrix = isinstance(exp, sympy.Matrix) + + if isUW: + varfns.add(exp) + if exp.args != exp.meshvar().mesh.r: + raise RuntimeError(f"Mesh Variable functions can only be evaluated as functions of '{exp.meshvar().mesh.r}'.\n" + f"However, mesh variable '{exp.meshvar().name}' appears to take the argument {exp.args}." ) + elif isMatrix: + for sub_exp in exp: + if isinstance(sub_exp, uw.function._function.UnderworldAppliedFunction): + varfns.add(sub_exp) + else: + for arg in sub_exp.args: + unpack_var_fns(arg) + + else: + # Recursively search for more functions + for arg in exp.args: + unpack_var_fns(arg) + + return + + unpack_var_fns(expr) + + + if verbose: + for varfn in varfns: + print(f"MeshVariable functions in evaluation: {varfns}") + + + # Check the same mesh is used for all mesh variables + mesh = None + for varfn in varfns: + if mesh is None: + mesh = varfn.meshvar().mesh + else: + if mesh != varfn.meshvar().mesh: + raise RuntimeError("In this expression there are functions defined on different meshes. This is not supported") + + + # If there are no mesh variables, then we have no need of a mesh to + # help us to evaluate the expression. The evalf flag will force rbf_evaluation and + # does not need mesh information + + if evalf==True or rbf==True: + return rbf_evaluate( expr, + coords, + coord_sys, + mesh, + simplify=simplify, + verbose=verbose, + ) + + + + if mesh is None: + in_or_not = np.full((coords.shape[0]), True, dtype=bool ) + return petsc_interpolate( expr, + coords[in_or_not], + coord_sys, + mesh, + simplify=simplify, + verbose=verbose, ) + + + + else: + in_or_not = mesh.points_in_domain(coords, strict_validation=False) + evaluation_interior = petsc_interpolate( expr, + coords[in_or_not], + coord_sys, + mesh, + simplify=simplify, + verbose=verbose, ) + + if np.count_nonzero(in_or_not == False) > 0: + evaluation_exterior = rbf_evaluate( expr, + coords[~in_or_not], + coord_sys, + mesh, + simplify=simplify, + verbose=verbose, ) + else: + evaluation_exterior = None + + + if len(evaluation_interior.shape) == 1: + evaluation = np.empty(shape=(in_or_not.shape[0],)) + else: + evaluation = np.empty(shape=(in_or_not.shape[0],evaluation_interior.shape[1] )) + + evaluation[in_or_not] = evaluation_interior + evaluation[~in_or_not] = evaluation_exterior + + + return evaluation + + +def petsc_interpolate( expr, + np.ndarray coords=None, + coord_sys=None, + mesh=None, + other_arguments=None, + simplify=True, verbose=False, ): """ Evaluate a given expression at a list of coordinates. @@ -244,88 +388,20 @@ def evaluate( expr, if verbose and uw.mpi.rank==0: print(f"Expression to be evaluated: {expr}") - # 1. Extract UW variables. - # Let's first collect all the meshvariables present in the expression and check - # them for validity. This is applied recursively across the expression - # Recurse the expression tree. - - # varfns = set() - # def get_var_fns(exp): - - # if isinstance(exp,uw.function._function.UnderworldAppliedFunctionDeriv): - # raise RuntimeError("Derivative functions are not handled in evaluations, a projection should be used first to create a mesh Variable.") - - # isUW = isinstance(exp, uw.function._function.UnderworldAppliedFunction) - # if isUW: - # varfns.add(exp) - # if exp.args != exp.meshvar().mesh.r: - # raise RuntimeError(f"Mesh Variable functions can only be evaluated as functions of '{exp.meshvar().mesh.r}'.\n" - # f"However, mesh variable '{exp.meshvar().name}' appears to take the argument {exp.args}." ) - # else: - # # Recurse. - # for arg in exp.args: - # get_var_fns(arg) - - # return - - # get_var_fns(expr) - - varfns = set() - def unpack_var_fns(exp): - - if isinstance(exp,uw.function._function.UnderworldAppliedFunctionDeriv): - raise RuntimeError("Derivative functions are not handled in evaluations, a projection should be used first to create a mesh Variable.") - - isUW = isinstance(exp, uw.function._function.UnderworldAppliedFunction) - isMatrix = isinstance(exp, sympy.Matrix) - - if isUW: - varfns.add(exp) - if exp.args != exp.meshvar().mesh.r: - raise RuntimeError(f"Mesh Variable functions can only be evaluated as functions of '{exp.meshvar().mesh.r}'.\n" - f"However, mesh variable '{exp.meshvar().name}' appears to take the argument {exp.args}." ) - elif isMatrix: - for sub_exp in exp: - if isinstance(sub_exp, uw.function._function.UnderworldAppliedFunction): - varfns.add(sub_exp) - else: - # Recurse. - for arg in exp.args: - unpack_var_fns(arg) - - return - - unpack_var_fns(expr) - - # Check the same mesh is used for all mesh variables - mesh = None - for varfn in varfns: - - if mesh is None: - mesh = varfn.meshvar().mesh - #mesh = varfn.mesh - else: - if mesh != varfn.meshvar().mesh: - raise RuntimeError("In this expression there are functions defined on different meshes. This is not supported") - - if verbose: - print(f"Mesh for evaluations: {mesh.name}", flush=True) - - - - if (len(varfns)==0) and (coords is None): - raise RuntimeError("Interpolation coordinates not specified by supplied expression contains mesh variables.\n" - "Mesh variables can only be interpolated at coordinates.") - - if mesh is not None: - varfns = set() - for var in mesh.vars.values(): - for subvar in var.sym_1d: - varfns.add(subvar) + # if (len(varfns)==0) and (coords is None): + # raise RuntimeError("Interpolation coordinates not specified by supplied expression contains mesh variables.\n" + # "Mesh variables can only be interpolated at coordinates.") # Create dictionary which creates a per mesh list of vars. # Usually there will only be a single mesh, but this allows for the # more general situation. + # + + varfns = set() + if mesh is not None and mesh.vars is not None: + for v in mesh.vars.values(): + for sub in v: + varfns.add(sub.sym) from collections import defaultdict interpolant_varfns = defaultdict(lambda : []) @@ -333,7 +409,6 @@ def evaluate( expr, for varfn in varfns: if verbose and uw.mpi.rank == 0: print(f"Varfn for interpolation: {varfn}") - interpolant_varfns[varfn.meshvar().mesh].append(varfn) @@ -361,7 +436,7 @@ def evaluate( expr, # by a simple coordinate hash. We kill this in the # .access for mesh variables but this is prone to mistakes - if coord_hash == mesh._evaluation_hash: + if False and coord_hash == mesh._evaluation_hash: # if uw.mpi.rank == 0: # print("Using uw.evaluation cache", flush=True) return mesh._evaluation_interpolated_results @@ -428,7 +503,7 @@ def evaluate( expr, ierr = DMInterpolationDestroy(&ipInfo);CHKERRQ(ierr) # Create map between array slices and variable functions - + # varfns_arrays = {} for varfn in varfns: var = varfn.meshvar() @@ -452,7 +527,7 @@ def evaluate( expr, return varfns_arrays - # Get map of all variable functions across all meshes. + # Get map of all variable functions interpolated_results = {} for key, vals in interpolant_varfns.items(): interpolated_var_values = interpolate_vars_on_mesh(vals, coords) @@ -466,6 +541,7 @@ def evaluate( expr, for varfn in interpolated_results.keys(): randstr = ''.join(random.choices(string.ascii_uppercase, k = 5)) varfns_symbols[varfn] = sympy.Symbol(randstr) + # subs variable fns in expression for symbols subbedexpr = expr.subs(varfns_symbols) @@ -491,6 +567,8 @@ def evaluate( expr, elif isinstance(subbedexpr, sympy.vector.Dyadic): subbedexpr = subbedexpr.to_matrix(N)[0:dim,0:dim] + + lambfn = lambdify( (r, varfns_symbols.values()), subbedexpr ) # Leave out modules. This is equivalent to SYMPY_DECIDE and can then include scipy if available @@ -540,9 +618,10 @@ evaluate = timing.routine_timer_decorator(routine=evaluate, class_name="Function ### ------------------------------ -def evalf( expr, +def rbf_evaluate( expr, coords=None, coord_sys=None, + mesh=None, other_arguments=None, verbose=False, simplify=True,): @@ -590,13 +669,16 @@ def evalf( expr, """ + ## These checks should be in the calling `evaluate` function + if not (isinstance( expr, sympy.Basic ) or isinstance( expr, sympy.Matrix ) ): raise RuntimeError("`evaluate()` function parameter `expr` does not appear to be a sympy expression.") sympy.core.cache.clear_cache() if uw.function.fn_is_constant_expr(expr): - return uw.function.expressions.unwrap(expr, keep_constants=False) + constant_value = uw.function.expressions.unwrap(expr, keep_constants=False) + return np.multiply.outer(np.ones(coords.shape[0]), np.array(constant_value, dtype=float).reshape(-1)) if (not coords is None) and not isinstance( coords, np.ndarray ): raise RuntimeError("`evaluate()` function parameter `input` does not appear to be a numpy array.") @@ -619,59 +701,23 @@ def evalf( expr, if simplify: expr = sympy.simplify(expr) - # 1. Extract UW variables. - - # Let's first collect all the meshvariables present in the expression and check - # them for validity. This is applied recursively across the expression - # Recurse the expression tree. + # 2. Evaluate all mesh variables - there is no real + # computational benefit in interpolating a subset. + # varfns = set() - def unpack_var_fns(exp): + if mesh is not None and mesh.vars is not None: + for v in mesh.vars.values(): + for sub in v: + varfns.add(sub.sym) - if isinstance(exp,uw.function._function.UnderworldAppliedFunctionDeriv): - raise RuntimeError("Derivative functions are not handled in evaluations, a projection should be used first to create a mesh Variable.") - - isUW = isinstance(exp, uw.function._function.UnderworldAppliedFunction) - isMatrix = isinstance(exp, sympy.Matrix) - - if isUW: - varfns.add(exp) - if exp.args != exp.meshvar().mesh.r: - raise RuntimeError(f"Mesh Variable functions can only be evaluated as functions of '{exp.meshvar().mesh.r}'.\n" - f"However, mesh variable '{exp.meshvar().name}' appears to take the argument {exp.args}." ) - elif isMatrix: - for sub_exp in exp: - if isinstance(sub_exp, uw.function._function.UnderworldAppliedFunction): - varfns.add(sub_exp) - else: - # Recurse. - for arg in exp.args: - unpack_var_fns(arg) - - return - - unpack_var_fns(expr) - - mesh = None - for varfn in varfns: - - if mesh is None: - mesh = varfn.meshvar().mesh - else: - if mesh != varfn.meshvar().mesh: - raise RuntimeError("In this expression there are functions defined on different meshes. This is not supported") - - # 2. Evaluate all mesh variables - there is no real - # computational benefit in interpolating a subset. # Get map of all variable functions (no cache) interpolated_results = {} - for varfn in varfns: parent, component = uw.discretisation.meshVariable_lookup_by_symbol(mesh, varfn) values = parent.rbf_interpolate(coords, nnn=mesh.dim+1)[:,component] interpolated_results[varfn] = values - if verbose: print(f"{varfn} = {parent.name}[{component}]") @@ -703,13 +749,6 @@ def evalf( expr, N = mesh.N r = N.base_scalars()[0:dim] - - # # This likely never applies any more - # if isinstance(subbedexpr, sympy.vector.Vector): - # subbedexpr = subbedexpr.to_matrix(N)[0:dim,0] - # elif isinstance(subbedexpr, sympy.vector.Dyadic): - # subbedexpr = subbedexpr.to_matrix(N)[0:dim,0:dim] - lambfn = lambdify( (r, varfns_symbols.values()), subbedexpr ) # 5. Eval generated lambda expression @@ -751,11 +790,37 @@ def evalf( expr, # Note that we don't use the @decorator here so that # we can pass in the `class_name` parameter. -evalf = timing.routine_timer_decorator(routine=evalf, class_name="Function") +rbf_evaluate = timing.routine_timer_decorator(routine=rbf_evaluate, class_name="Function") + +## Not sure these belong with the uw function cython def dm_swarm_get_migrate_type(swarm): - cdef DM dm = swarm.dm + # cdef DM dm = swarm.dm + # cdef PetscErrorCode ierr + # cdef DMSwarmMigrateType mtype + + # ierr = DMSwarmGetMigrateType(dm.dm, &mtype); CHKERRQ(ierr) + + mtype = _dmswarm_get_migrate_type(swarm.dm) + + return mtype + +def dm_swarm_set_migrate_type(swarm, mtype:PETsc.DMSwarm.MigrateType): + + _dmswarm_set_migrate_type(swarm.dm, mtype) + + # cdef DM dm = swarm.dm + # cdef PetscErrorCode ierr + # cdef DMSwarmMigrateType mig = mtype + + # ierr = DMSwarmSetMigrateType(dm.dm, mig); CHKERRQ(ierr) + + return + +def _dmswarm_get_migrate_type(sdm): + + cdef DM dm = sdm cdef PetscErrorCode ierr cdef DMSwarmMigrateType mtype @@ -763,9 +828,9 @@ def dm_swarm_get_migrate_type(swarm): return mtype -def dm_swarm_set_migrate_type(swarm, mtype:PETsc.DMSwarm.MigrateType): +def _dmswarm_set_migrate_type(sdm, mtype:PETsc.DMSwarm.MigrateType): - cdef DM dm = swarm.dm + cdef DM dm = sdm cdef PetscErrorCode ierr cdef DMSwarmMigrateType mig = mtype diff --git a/src/underworld3/function/expressions.py b/src/underworld3/function/expressions.py index 630c1248..9f591db4 100644 --- a/src/underworld3/function/expressions.py +++ b/src/underworld3/function/expressions.py @@ -156,7 +156,7 @@ class UWexpression(uw_object, Symbol): """ _expr_count = 0 - _expr_names = [] + _expr_names = {} def __new__( cls, @@ -170,27 +170,26 @@ def __new__( instance_no = UWexpression._expr_count - if name in UWexpression._expr_names and _unique_name_generation == False: + ## if the expression already exists, do not replace it (but return the existing object instead) + + if name in UWexpression._expr_names.keys() and _unique_name_generation == False: warnings.warn( message=f"EXPRESSIONS {name}: Each expression should have a unique name - new expression was not generated", ) - return None + return UWexpression._expr_names[name] if name in UWexpression._expr_names and _unique_name_generation == True: - invisible = rf"\hspace{{ {instance_no/100}pt }}" unique_name = f"{{ {name} {invisible} }}" - else: unique_name = name - UWexpression._expr_names.append(unique_name) - obj = Symbol.__new__(cls, unique_name) obj._instance_no = instance_no obj._unique_name = unique_name obj._given_name = name + UWexpression._expr_names[unique_name] = obj UWexpression._expr_count += 1 return obj diff --git a/src/underworld3/kdtree.py b/src/underworld3/kdtree.py index 3bff6a8c..e396d50f 100644 --- a/src/underworld3/kdtree.py +++ b/src/underworld3/kdtree.py @@ -2,35 +2,45 @@ import underworld3 as uw import numpy as np + +## Note we are missing the function rbf_interpolator_local_to_kdtree +# +# Should be used in this swarm proxy function instead of hand-writing the code: +# +# def _rbf_reduce_to_meshVar(self, meshVar, verbose=False): +# """ +# This method updates a mesh variable for the current +# swarm & particle variable state by reducing the swarm to +# the nearest point for each particle + + # inherit from the pykdtree -class KDTree( _oKDTree ): - def rbf_interpolator_local(self, +class KDTree(_oKDTree): + def rbf_interpolator_local( + self, coords, data, - nnn = 4, - p = 2, - verbose = False, + nnn=4, + p=2, + verbose=False, ): return self.rbf_interpolator_local_from_kdtree( - coords, data, nnn, p, verbose, - ) + coords, + data, + nnn, + p, + verbose, + ) - # def find_closest_points( self, coords, nnn ) - - def rbf_interpolator_local_from_kdtree( - self, - coords, - data, - nnn, - p, - verbose): + # def find_closest_points( self, coords, nnn ) - ''' + def rbf_interpolator_local_from_kdtree(self, coords, data, nnn, p, verbose): + """ Performs an inverse distance (squared) mapping of data to the target `coords`. User can controls the algorithm by altering the number of neighbours used, `nnn` or the power factor `p` of the mapping weighting. - + Args: coords : ndarray, The target spatial coordinates to evaluate the data from. @@ -44,12 +54,16 @@ def rbf_interpolator_local_from_kdtree( The power index to calculate weights, ie. pow(distance, -p) verbose : bool, Print when mapping occurs - ''' + """ if coords.shape[1] != self.ndim: - raise RuntimeError(f"Interpolation coordinates dimensionality ({coords.shape[1]}) is different to kD-tree dimensionality ({self.ndim}).") + raise RuntimeError( + f"Interpolation coordinates dimensionality ({coords.shape[1]}) is different to kD-tree dimensionality ({self.ndim})." + ) if data.shape[0] != self.n: - raise RuntimeError(f"Data does not match kd-tree size array ({data.shape[0]} v ({self.n}))") + raise RuntimeError( + f"Data does not match kd-tree size array ({data.shape[0]} v ({self.n}))" + ) coords_contiguous = np.ascontiguousarray(coords) # query nnn points to the coords @@ -58,7 +72,9 @@ def rbf_interpolator_local_from_kdtree( distance_n, closest_n = self.query(coords_contiguous, k=nnn) if np.any(closest_n > self.n): - raise RuntimeError("Error in rbf_interpolator_local_from_kdtree - a nearest neighbour wasn't found") + raise RuntimeError( + "Error in rbf_interpolator_local_from_kdtree - a nearest neighbour wasn't found" + ) if verbose and uw.mpi.rank == 0: # For Debugging @@ -68,19 +84,22 @@ def rbf_interpolator_local_from_kdtree( if nnn == 1: # only use nearest neighbour raw data return data[closest_n] - + # can decompose weighting vecotrs as IDW is a linear relationship # build normalise weight vectors and multiply that with known data - epsilon = 1e-12 - weights = 1 / np.pow( epsilon+distance_n[:], p) + epsilon = 1e-12 + weights = 1 / np.power(epsilon + distance_n[:], p) n_weights = (weights.T / np.sum(weights, axis=1)).T - kdata = data[closest_n[:]] - + kdata = data[closest_n[:]] + # magic with einstein summation power - vals = np.einsum('sdc,sd->sc', kdata, n_weights) + vals = np.einsum("sdc,sd->sc", kdata, n_weights) # print(valz) if verbose and uw.mpi.rank == 0: print(f"Mapping values ... finished", flush=True) - + return vals + + +## NB the rbf interpolator TO kdtree is missing (and we need that one that we introduced to do a better job of mapping values from swarms to nodes for proxy variables) diff --git a/src/underworld3/kdtree_interface.hpp b/src/underworld3/kdtree_interface.hpp new file mode 100644 index 00000000..4fd28195 --- /dev/null +++ b/src/underworld3/kdtree_interface.hpp @@ -0,0 +1,105 @@ +#include "nanoflann.hpp" + +struct PointCloudAdaptor +{ + const double* obj; + int numpoints; + int dim; + + /// The constructor that sets the data set source + PointCloudAdaptor(const double* obj, int numpoints, int dim) : obj(obj), numpoints(numpoints), dim(dim) { }; + + // Must return the number of data points + inline size_t kdtree_get_point_count() const { return numpoints; }; + + // Returns the dim'th component of the idx'th point in the class: + // Since this is inlined and the "dim" argument is typically an immediate value, the + // "if/else's" are actually solved at compile time. + inline double kdtree_get_pt(const size_t idx, const size_t dof) const + { + return obj[idx*dim + dof]; + } + + // Optional bounding-box computation: return false to default to a standard bbox computation loop. + // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. + // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) + template + bool kdtree_get_bbox(BBOX& /*bb*/) const { return false; } + +}; // end of PointCloudAdaptor + +// construct a kd-tree index +typedef nanoflann::KDTreeSingleIndexAdaptor< + nanoflann::L2_Simple_Adaptor , + PointCloudAdaptor, + 2 /* dim */ + > tree_2d; +typedef nanoflann::KDTreeSingleIndexAdaptor< + nanoflann::L2_Simple_Adaptor , + PointCloudAdaptor, + 3 /* dim */ + > tree_3d; + +class KDTree_Interface +{ + + int dim=0; + PointCloudAdaptor* pca=nullptr; + tree_2d* index2d=nullptr; + tree_3d* index3d=nullptr; + public: + KDTree_Interface( const double* points, int numpoints, int dim ) : dim(dim), pca(new PointCloudAdaptor(points, numpoints, dim)) + { + if(dim==2) + { + index2d = new tree_2d(dim, *pca, nanoflann::KDTreeSingleIndexAdaptorParams(10 /* max leaf */) ); + index2d->buildIndex(); + } + else + { + index3d = new tree_3d(dim, *pca, nanoflann::KDTreeSingleIndexAdaptorParams(10 /* max leaf */) ); + index3d->buildIndex(); + } + }; + ~KDTree_Interface() + { + delete(index3d); + index3d=nullptr; + delete(index2d); + index3d=nullptr; + delete(pca); + pca=nullptr; + }; + void build_index() + { + if(dim==2) + index2d->buildIndex(); + else + index3d->buildIndex(); + }; + void find_closest_point( size_t num_coords, const double* coords, long unsigned int* indices, double* out_dist_sqr, bool* found ) + { + double dist; + nanoflann::KNNResultSet resultSet(1); + for (size_t item=0; itemfindNeighbors(resultSet, &coords[item*dim], nanoflann::SearchParams(10)); // note that I believe the value 10 here is ignored.. i'll retain it as it's used in the examples + else + founditem = index3d->findNeighbors(resultSet, &coords[item*dim], nanoflann::SearchParams(10)); // See line 561 of .hpp, not used but + // if you want to set other args you'll need to be aware of it + if (out_dist_sqr!=NULL) out_dist_sqr[item] = dist; + if ( found!=NULL) found[item] = founditem; + } + }; + + size_t knnSearch(const double* query_point, const size_t num_closest, long unsigned int* indices, double* out_dist_sqr ) { + if( dim == 2 ) + return index2d->knnSearch( query_point, num_closest, indices, out_dist_sqr ); + else + return index3d->knnSearch( query_point, num_closest, indices, out_dist_sqr ); + }; + +}; diff --git a/src/underworld3/meshing.py b/src/underworld3/meshing.py index 08a0abec..da5bed3f 100644 --- a/src/underworld3/meshing.py +++ b/src/underworld3/meshing.py @@ -1557,7 +1557,7 @@ def annulus_return_coords_to_bounds(coords): inside = Rsq < radiusInner**2 coords[outside, :] *= 0.99 * radiusOuter / (Rsq[outside] ** 0.5).reshape(-1, 1) - coords[inside, :] *= 1.01 * radiusInner / (Rsq[inside] ** 0.5).reshape(-1, 1) + coords[inside, :] *= 1.01 * radiusInner / (Rsq[inside] ** 0.5).reshape(-1, 1) return coords @@ -2338,7 +2338,6 @@ class boundaries(Enum): Upper = 3 Centre = 10 - if cellSize_Lower is None: cellSize_Lower = cellSize @@ -2548,7 +2547,6 @@ class boundaries(Enum): Lower = 1 Upper = 2 - r1 = radiusInner / np.sqrt(3) r2 = radiusOuter / np.sqrt(3) @@ -2750,8 +2748,8 @@ class boundary_normals(Enum): def RegionalSphericalBox( radiusOuter: float = 1.0, radiusInner: float = 0.547, - SWcorner=[-45,-45], - NEcorner=[+45,+45], + SWcorner=[-45, -45], + NEcorner=[+45, +45], numElementsLon: int = 5, numElementsLat: int = 5, numElementsDepth: int = 5, @@ -2760,6 +2758,7 @@ def RegionalSphericalBox( simplex: bool = False, filename=None, refinement=None, + coarsening=None, gmsh_verbosity=0, verbose=False, ): @@ -2778,46 +2777,53 @@ class boundaries(Enum): ln_min = np.radians(SWcorner[0]) ln_max = np.radians(NEcorner[0]) - p2 = ( radiusOuter * np.cos(lt_max) * np.cos(ln_max), - radiusOuter * np.cos(lt_max) * np.sin(ln_max), - radiusOuter * np.sin(lt_max) + p2 = ( + radiusOuter * np.cos(lt_max) * np.cos(ln_max), + radiusOuter * np.cos(lt_max) * np.sin(ln_max), + radiusOuter * np.sin(lt_max), ) - p3 = ( radiusOuter * np.cos(lt_max) * np.cos(ln_min), - radiusOuter * np.cos(lt_max) * np.sin(ln_min), - radiusOuter * np.sin(lt_max) - ) - - p4 = ( radiusOuter * np.cos(lt_min) * np.cos(ln_min), - radiusOuter * np.cos(lt_min) * np.sin(ln_min), - radiusOuter * np.sin(lt_min) + p3 = ( + radiusOuter * np.cos(lt_max) * np.cos(ln_min), + radiusOuter * np.cos(lt_max) * np.sin(ln_min), + radiusOuter * np.sin(lt_max), ) - p5 = ( radiusOuter * np.cos(lt_min) * np.cos(ln_max), - radiusOuter * np.cos(lt_min) * np.sin(ln_max), - radiusOuter * np.sin(lt_min) + p4 = ( + radiusOuter * np.cos(lt_min) * np.cos(ln_min), + radiusOuter * np.cos(lt_min) * np.sin(ln_min), + radiusOuter * np.sin(lt_min), ) - p6 = ( radiusInner * np.cos(lt_max) * np.cos(ln_max), - radiusInner * np.cos(lt_max) * np.sin(ln_max), - radiusInner * np.sin(lt_max) + p5 = ( + radiusOuter * np.cos(lt_min) * np.cos(ln_max), + radiusOuter * np.cos(lt_min) * np.sin(ln_max), + radiusOuter * np.sin(lt_min), ) - p7 = ( radiusInner * np.cos(lt_max) * np.cos(ln_min), - radiusInner * np.cos(lt_max) * np.sin(ln_min), - radiusInner * np.sin(lt_max) - ) + p6 = ( + radiusInner * np.cos(lt_max) * np.cos(ln_max), + radiusInner * np.cos(lt_max) * np.sin(ln_max), + radiusInner * np.sin(lt_max), + ) - p8 = ( radiusInner * np.cos(lt_min) * np.cos(ln_min), - radiusInner * np.cos(lt_min) * np.sin(ln_min), - radiusInner * np.sin(lt_min) + p7 = ( + radiusInner * np.cos(lt_max) * np.cos(ln_min), + radiusInner * np.cos(lt_max) * np.sin(ln_min), + radiusInner * np.sin(lt_max), ) - p9 = ( radiusInner * np.cos(lt_min) * np.cos(ln_max), - radiusInner * np.cos(lt_min) * np.sin(ln_max), - radiusInner * np.sin(lt_min) + p8 = ( + radiusInner * np.cos(lt_min) * np.cos(ln_min), + radiusInner * np.cos(lt_min) * np.sin(ln_min), + radiusInner * np.sin(lt_min), ) + p9 = ( + radiusInner * np.cos(lt_min) * np.cos(ln_max), + radiusInner * np.cos(lt_min) * np.sin(ln_max), + radiusInner * np.sin(lt_min), + ) # lat_south = np.radians(centralLatitude - latitudeExtent/2) # lat_north = np.radians(centralLatitude + latitudeExtent/2) @@ -2846,7 +2852,6 @@ class boundaries(Enum): center_point = gmsh.model.geo.addPoint(0.0, 0.0, 0.0, tag=1) - gmsh.model.geo.addPoint(p2[0], p2[1], p2[2], tag=2) gmsh.model.geo.addPoint(p3[0], p3[1], p3[2], tag=3) gmsh.model.geo.addPoint(p4[0], p4[1], p4[2], tag=4) @@ -2918,7 +2923,6 @@ class boundaries(Enum): gmsh.model.addPhysicalGroup(3, [1, 2], 99999) gmsh.model.setPhysicalName(3, 99999, "Elements") - ## We need to know which surface !! # for _, line in gmsh.model.get_entities(1): # gmsh.model.mesh.setTransfiniteCurve(line, numNodes=numElements + 1) @@ -2931,19 +2935,16 @@ class boundaries(Enum): gmsh.model.mesh.setTransfiniteCurve(3, numNodes=numElementsLon + 1) gmsh.model.mesh.setTransfiniteCurve(4, numNodes=numElementsLat + 1) - gmsh.model.mesh.setTransfiniteCurve(5, numNodes=numElementsLon + 1) gmsh.model.mesh.setTransfiniteCurve(6, numNodes=numElementsLat + 1) gmsh.model.mesh.setTransfiniteCurve(7, numNodes=numElementsLon + 1) gmsh.model.mesh.setTransfiniteCurve(8, numNodes=numElementsLat + 1) - gmsh.model.mesh.setTransfiniteCurve(9, numNodes=numElementsDepth + 1) gmsh.model.mesh.setTransfiniteCurve(10, numNodes=numElementsDepth + 1) gmsh.model.mesh.setTransfiniteCurve(11, numNodes=numElementsDepth + 1) gmsh.model.mesh.setTransfiniteCurve(12, numNodes=numElementsDepth + 1) - for _, surface in gmsh.model.get_entities(2): gmsh.model.mesh.setTransfiniteSurface(surface) if not simplex: @@ -2960,7 +2961,6 @@ class boundaries(Enum): gmsh.write(uw_filename) gmsh.finalize() - ## This needs a side-boundary capture routine as well def sphere_return_coords_to_bounds(coords): @@ -3021,6 +3021,8 @@ def spherical_mesh_refinement_callback(dm): boundary_normals=None, refinement=refinement, refinement_callback=spherical_mesh_refinement_callback, + coarsening=coarsening, + coarsening_callback=spherical_mesh_refinement_callback, coordinate_system_type=CoordinateSystemType.SPHERICAL, return_coords_to_bounds=sphere_return_coords_to_bounds, verbose=verbose, @@ -3233,7 +3235,6 @@ class boundaries(Enum): Centre = 1 Slices = 40 - meshRes = cellSize num_segments = numSegments @@ -3638,7 +3639,6 @@ class boundaries(Enum): Slices = 40 Null_Boundary = 666 - meshRes = cellSize num_segments = numSegments @@ -4057,15 +4057,15 @@ class boundary_normals_3D(Enum): os.makedirs(".meshes", exist_ok=True) if not simplex: # structuredQuadBoxIB - uw_filename = (f".meshes/uw_sqbIB_minC{minCoords}_maxC{maxCoords}.msh") + uw_filename = f".meshes/uw_sqbIB_minC{minCoords}_maxC{maxCoords}.msh" else: - uw_filename = (f".meshes/uw_usbIB_minC{minCoords}_maxC{maxCoords}.msh") + uw_filename = f".meshes/uw_usbIB_minC{minCoords}_maxC{maxCoords}.msh" else: uw_filename = filename - if uw.mpi.rank == 0: import gmsh + gmsh.initialize() gmsh.option.setNumber("General.Verbosity", gmsh_verbosity) gmsh.model.add("Box") @@ -4079,8 +4079,8 @@ class boundary_normals_3D(Enum): if not simplex: cellSize = 0.0 - nx,ny = elementRes - ny_a,ny_b = zelementRes + nx, ny = elementRes + ny_a, ny_b = zelementRes p1 = gmsh.model.geo.add_point(xmin, ymin, 0.0, cellSize) p2 = gmsh.model.geo.add_point(xmax, ymin, 0.0, cellSize) @@ -4107,40 +4107,55 @@ class boundary_normals_3D(Enum): gmsh.model.geo.synchronize() # Add Physical groups for boundaries - gmsh.model.add_physical_group(1, [l1,], boundaries.Bottom.value) + gmsh.model.add_physical_group( + 1, + [ + l1, + ], + boundaries.Bottom.value, + ) gmsh.model.set_physical_name(1, l1, boundaries.Bottom.name) gmsh.model.add_physical_group(1, [l2], boundaries.Top.value) gmsh.model.set_physical_name(1, l2, boundaries.Top.name) gmsh.model.add_physical_group(1, [l3, l4], boundaries.Left.value) gmsh.model.set_physical_name(1, l34, boundaries.Left.name) - gmsh.model.add_physical_group(1, [l5,l6], boundaries.Right.value) + gmsh.model.add_physical_group(1, [l5, l6], boundaries.Right.value) gmsh.model.set_physical_name(1, l56, boundaries.Right.name) gmsh.model.add_physical_group(1, [l7], boundaries.Internal.value) gmsh.model.set_physical_name(1, l7, boundaries.Internal.name) - gmsh.model.addPhysicalGroup(2, [surface1,surface2], 99999) + gmsh.model.addPhysicalGroup(2, [surface1, surface2], 99999) gmsh.model.setPhysicalName(2, 99999, "Elements") if not simplex: gmsh.model.mesh.set_transfinite_curve( - tag=l1, numNodes=nx + 1, meshType="Progression", coef=1.0) + tag=l1, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) gmsh.model.mesh.set_transfinite_curve( - tag=l2, numNodes=nx + 1, meshType="Progression", coef=1.0) + tag=l2, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) gmsh.model.mesh.set_transfinite_curve( - tag=l3, numNodes=ny_b + 1, meshType="Progression", coef=1.0) + tag=l3, numNodes=ny_b + 1, meshType="Progression", coef=1.0 + ) gmsh.model.mesh.set_transfinite_curve( - tag=l4, numNodes=ny_a + 1, meshType="Progression", coef=1.0) + tag=l4, numNodes=ny_a + 1, meshType="Progression", coef=1.0 + ) gmsh.model.mesh.set_transfinite_curve( - tag=l5, numNodes=ny_b + 1, meshType="Progression", coef=1.0) + tag=l5, numNodes=ny_b + 1, meshType="Progression", coef=1.0 + ) gmsh.model.mesh.set_transfinite_curve( - tag=l6, numNodes=ny_a + 1, meshType="Progression", coef=1.0) + tag=l6, numNodes=ny_a + 1, meshType="Progression", coef=1.0 + ) gmsh.model.mesh.set_transfinite_curve( - tag=l7, numNodes=nx + 1, meshType="Progression", coef=1.0) + tag=l7, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) gmsh.model.mesh.set_transfinite_surface( - tag=surface1, arrangement="Left", cornerTags=[p1, p2, p5, p6]) + tag=surface1, arrangement="Left", cornerTags=[p1, p2, p5, p6] + ) gmsh.model.mesh.set_recombine(2, surface1) gmsh.model.mesh.set_transfinite_surface( - tag=surface2, arrangement="Left", cornerTags=[p5, p6, p3, p4]) + tag=surface2, arrangement="Left", cornerTags=[p5, p6, p3, p4] + ) gmsh.model.mesh.set_recombine(2, surface2) gmsh.model.mesh.generate(dim) @@ -4157,7 +4172,7 @@ class boundary_normals_3D(Enum): if not simplex: cellSize = 0.0 nx, ny, nz = elementRes - nzt,nzb = zelementRes + nzt, nzb = zelementRes p1t = gmsh.model.geo.add_point(xmin, ymin, zmax, cellSize) p2t = gmsh.model.geo.add_point(xmax, ymin, zmax, cellSize) @@ -4233,9 +4248,13 @@ class boundary_normals_3D(Enum): cl = gmsh.model.geo.add_curve_loop((l8b, l3i, -l7b, -l3b)) back_b = gmsh.model.geo.add_plane_surface([cl]) - sloop1 = gmsh.model.geo.add_surface_loop([front_t, right_t, back_t, top, left_t, internal]) + sloop1 = gmsh.model.geo.add_surface_loop( + [front_t, right_t, back_t, top, left_t, internal] + ) volume_t = gmsh.model.geo.add_volume([sloop1]) - sloop2 = gmsh.model.geo.add_surface_loop([front_b, right_b, back_b, internal, left_b, bottom]) + sloop2 = gmsh.model.geo.add_surface_loop( + [front_b, right_b, back_b, internal, left_b, bottom] + ) volume_b = gmsh.model.geo.add_volume([sloop2]) gmsh.model.geo.synchronize() @@ -4248,50 +4267,112 @@ class boundary_normals_3D(Enum): gmsh.model.set_physical_name(2, internal, boundaries.Internal.name) gmsh.model.add_physical_group(2, [left_t, left_b], boundaries.Left.value) gmsh.model.set_physical_name(2, left, boundaries.Left.name) - gmsh.model.add_physical_group(2, [right_t,right_b], boundaries.Right.value) + gmsh.model.add_physical_group(2, [right_t, right_b], boundaries.Right.value) gmsh.model.set_physical_name(2, right, boundaries.Right.name) gmsh.model.add_physical_group(2, [front_t, front_b], boundaries.Front.value) gmsh.model.set_physical_name(2, front, boundaries.Front.name) - gmsh.model.add_physical_group(2, [back_t,back_b], boundaries.Back.value) + gmsh.model.add_physical_group(2, [back_t, back_b], boundaries.Back.value) gmsh.model.set_physical_name(2, back, boundaries.Back.name) - gmsh.model.addPhysicalGroup(3, [volume_t,volume_b], 99999) + gmsh.model.addPhysicalGroup(3, [volume_t, volume_b], 99999) gmsh.model.setPhysicalName(3, 99999, "Elements") if not simplex: - gmsh.model.mesh.set_transfinite_curve(l1t, numNodes=nx + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l2t, numNodes=ny + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l3t, numNodes=nx + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l4t, numNodes=ny + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l1i, numNodes=nx + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l2i, numNodes=ny + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l3i, numNodes=nx + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l4i, numNodes=ny + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l1b, numNodes=nx + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l2b, numNodes=ny + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l3b, numNodes=nx + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l4b, numNodes=ny + 1, meshType="Progression", coef=1.0) - - gmsh.model.mesh.set_transfinite_curve(l5t, numNodes=nzt + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l6t, numNodes=nzt + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l7t, numNodes=nzt + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l8t, numNodes=nzt + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l5b, numNodes=nzb + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l6b, numNodes=nzb + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l7b, numNodes=nzb + 1, meshType="Progression", coef=1.0) - gmsh.model.mesh.set_transfinite_curve(l8b, numNodes=nzb + 1, meshType="Progression", coef=1.0) - - gmsh.model.mesh.set_transfinite_surface(tag=bottom, arrangement="Left", cornerTags=[p1b, p2b, p4b, p3b]) - gmsh.model.mesh.set_transfinite_surface(tag=top, arrangement="Left", cornerTags=[p1t, p2t, p4t, p3t]) - gmsh.model.mesh.set_transfinite_surface(tag=internal, arrangement="Left", cornerTags=[p1i, p2i, p4i, p3i]) - gmsh.model.mesh.set_transfinite_surface(tag=front_t, arrangement="Left", cornerTags=[p1i, p2i, p2t, p1t]) - gmsh.model.mesh.set_transfinite_surface(tag=back_t, arrangement="Left", cornerTags=[p3i, p4i, p4t, p3t]) - gmsh.model.mesh.set_transfinite_surface(tag=right_t, arrangement="Left", cornerTags=[p2i, p4i, p4t, p2t]) - gmsh.model.mesh.set_transfinite_surface(tag=left_t, arrangement="Left", cornerTags=[p1i, p3i, p3t, p1t]) - gmsh.model.mesh.set_transfinite_surface(tag=front_b, arrangement="Left", cornerTags=[p1b, p2b, p2i, p1i]) - gmsh.model.mesh.set_transfinite_surface(tag=back_b, arrangement="Left", cornerTags=[p3b, p4b, p4i, p3i]) - gmsh.model.mesh.set_transfinite_surface(tag=right_b, arrangement="Left", cornerTags=[p2b, p4b, p4i, p2i]) - gmsh.model.mesh.set_transfinite_surface(tag=left_b, arrangement="Left", cornerTags=[p1b, p3b, p3i, p1i]) + gmsh.model.mesh.set_transfinite_curve( + l1t, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l2t, numNodes=ny + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l3t, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l4t, numNodes=ny + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l1i, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l2i, numNodes=ny + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l3i, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l4i, numNodes=ny + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l1b, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l2b, numNodes=ny + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l3b, numNodes=nx + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l4b, numNodes=ny + 1, meshType="Progression", coef=1.0 + ) + + gmsh.model.mesh.set_transfinite_curve( + l5t, numNodes=nzt + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l6t, numNodes=nzt + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l7t, numNodes=nzt + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l8t, numNodes=nzt + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l5b, numNodes=nzb + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l6b, numNodes=nzb + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l7b, numNodes=nzb + 1, meshType="Progression", coef=1.0 + ) + gmsh.model.mesh.set_transfinite_curve( + l8b, numNodes=nzb + 1, meshType="Progression", coef=1.0 + ) + + gmsh.model.mesh.set_transfinite_surface( + tag=bottom, arrangement="Left", cornerTags=[p1b, p2b, p4b, p3b] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=top, arrangement="Left", cornerTags=[p1t, p2t, p4t, p3t] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=internal, arrangement="Left", cornerTags=[p1i, p2i, p4i, p3i] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=front_t, arrangement="Left", cornerTags=[p1i, p2i, p2t, p1t] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=back_t, arrangement="Left", cornerTags=[p3i, p4i, p4t, p3t] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=right_t, arrangement="Left", cornerTags=[p2i, p4i, p4t, p2t] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=left_t, arrangement="Left", cornerTags=[p1i, p3i, p3t, p1t] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=front_b, arrangement="Left", cornerTags=[p1b, p2b, p2i, p1i] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=back_b, arrangement="Left", cornerTags=[p3b, p4b, p4i, p3i] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=right_b, arrangement="Left", cornerTags=[p2b, p4b, p4i, p2i] + ) + gmsh.model.mesh.set_transfinite_surface( + tag=left_b, arrangement="Left", cornerTags=[p1b, p3b, p3i, p1i] + ) gmsh.model.mesh.set_recombine(2, bottom) gmsh.model.mesh.set_recombine(2, top) gmsh.model.mesh.set_recombine(2, internal) @@ -4304,8 +4385,12 @@ class boundary_normals_3D(Enum): gmsh.model.mesh.set_recombine(2, right_b) gmsh.model.mesh.set_recombine(2, left_b) - gmsh.model.mesh.set_transfinite_volume(volume_t, cornerTags=[p1i, p2i, p4i, p3i, p1t, p2t, p4t, p3t]) - gmsh.model.mesh.set_transfinite_volume(volume_b, cornerTags=[p1b, p2b, p4b, p3b, p1i, p2i, p4i, p3i]) + gmsh.model.mesh.set_transfinite_volume( + volume_t, cornerTags=[p1i, p2i, p4i, p3i, p1t, p2t, p4t, p3t] + ) + gmsh.model.mesh.set_transfinite_volume( + volume_b, cornerTags=[p1b, p2b, p4b, p3b, p1i, p2i, p4i, p3i] + ) gmsh.model.mesh.set_recombine(3, volume_t) gmsh.model.mesh.set_recombine(3, volume_b) @@ -4324,7 +4409,7 @@ def box_return_coords_to_bounds(coords): coords[x10s, :] = minCoords[1] coords[x11s, :] = maxCoords[1] - if dim ==3: + if dim == 3: x20s = coords[:, 1] < minCoords[2] x21s = coords[:, 1] > maxCoords[2] coords[x20s, :] = minCoords[2] @@ -4342,9 +4427,10 @@ def box_return_coords_to_bounds(coords): useMultipleTags=True, useRegions=False, markVertices=True, - refinement=0., + refinement=0.0, refinement_callback=None, return_coords_to_bounds=box_return_coords_to_bounds, - verbose=verbose,) + verbose=verbose, + ) uw.adaptivity._dm_unstack_bcs(new_mesh.dm, new_mesh.boundaries, "Face Sets") return new_mesh diff --git a/src/underworld3/swarm.py b/src/underworld3/swarm.py index f48a2631..7e7c5d19 100644 --- a/src/underworld3/swarm.py +++ b/src/underworld3/swarm.py @@ -19,10 +19,13 @@ from enum import Enum +# We can grab this type from the PETSc module class SwarmType(Enum): + DMSWARM_BASIC = 0 DMSWARM_PIC = 1 +# We can grab this type from the PETSc module class SwarmPICLayout(Enum): """ Particle population fill type: @@ -329,15 +332,14 @@ def _rbf_reduce_to_meshVar(self, meshVar, verbose=False): kd = uw.kdtree.KDTree(meshVar.coords) with self.swarm.access(): - #n, d, b = kd.find_closest_point(self.swarm.data) - d, n = kd.query(self.swarm.data, k=1) + d, n = kd.query(self.swarm.data, k=1) node_values = np.zeros((meshVar.coords.shape[0], self.num_components)) w = np.zeros(meshVar.coords.shape[0]) if not self._nn_proxy: for i in range(self.data.shape[0]): - #if b[i]: + # if b[i]: node_values[n[i], :] += self.data[i, :] / (1.0e-24 + d[i]) w[n[i]] += 1.0 / (1.0e-24 + d[i]) @@ -389,7 +391,7 @@ def rbf_interpolate(self, new_coords, verbose=False, nnn=None): D = self.data.copy() kdt = uw.kdtree.KDTree(self.swarm.particle_coordinates.data[:, :]) - #kdt.build_index() + # kdt.build_index() values = kdt.rbf_interpolator_local(new_coords, D, nnn, 2, verbose) @@ -518,11 +520,14 @@ def read_timestep( # check if swarmFilename exists if os.path.isfile(os.path.abspath(swarmFilename)): # easier to debug abs path + print(f"Reading swarm information from {swarmFilename}", flush=True) pass else: raise RuntimeError(f"{os.path.abspath(swarmFilename)} does not exist") if os.path.isfile(os.path.abspath(filename)): + print(f"Reading variable information from {filename}", flush=True) + pass else: raise RuntimeError(f"{os.path.abspath(filename)} does not exist") @@ -549,12 +554,12 @@ def read_timestep( all_coords = h5f_swarm["coordinates"][()] all_data = h5f_data["data"][()] - cell = self.swarm.mesh.get_closest_local_cells(all_coords) - local = np.where(cell >= 0)[0] - # not_not_local = np.where(cell == -1)[0] + # cell = self.swarm.mesh.get_closest_local_cells(all_coords) + # local = np.where(cell >= 0)[0] + # # not_not_local = np.where(cell == -1)[0] - local_coords = all_coords[local] - local_data = all_data[local] + local_coords = all_coords # [local] + local_data = all_data # [local] kdt = uw.kdtree.KDTree(local_coords) @@ -591,7 +596,7 @@ def __init__( self.radius_s = radius**2 self.update_type = update_type if self.update_type == 1: - self.nnn_bc = npoints_bc + self.nnn_bc = npoints_bc self.ind_bc = ind_bc # These are the things we require of the generic swarm variable type @@ -702,7 +707,7 @@ def _update(self): 2) for each index in the set, we create a mask mesh variable by mapping 1.0 wherever the index matches and 0.0 where it does not. - NOTE: If no material is identified with a given nodal value, the default is to impose + NOTE: If no material is identified with a given nodal value, the default is to impose a near-neighbour hunt for a valid material and set that one ## ToDo: This should be revisited to match the updated master copy of _update @@ -713,48 +718,54 @@ def _update(self): """ if self.update_type == 0: kd = uw.kdtree.KDTree(self._meshLevelSetVars[0].coords) - + with self.swarm.access(): - #n_indices, n_distance = kd.find_closest_n_points(self.nnn,self.swarm.particle_coordinates.data) - n_distance, n_indices = kd.query(self.swarm.particle_coordinates.data, k=self.nnn) + n_distance, n_indices = kd.query( + self.swarm.particle_coordinates.data, k=self.nnn + ) kd_swarm = uw.kdtree.KDTree(self.swarm.particle_coordinates.data) - #n, d, b = kd_swarm.find_closest_point(self._meshLevelSetVars[0].coords) - d, n = kd_swarm.query(self._meshLevelSetVars[0].coords, k=1) - + # n, d, b = kd_swarm.find_closest_point(self._meshLevelSetVars[0].coords) + d, n = kd_swarm.query( + self._meshLevelSetVars[0].coords, k=1, sqr_dist=True + ) + for ii in range(self.indices): meshVar = self._meshLevelSetVars[ii] - + with self.swarm.mesh.access(meshVar), self.swarm.access(): node_values = np.zeros((meshVar.data.shape[0],)) w = np.zeros((meshVar.data.shape[0],)) - + for i in range(self.data.shape[0]): - tem = np.isclose(n_distance[i,:],n_distance[i,0]) - dist = n_distance[i,tem] - indices = n_indices[i,tem] - tem = dist 0.0)[0]] /= w[np.where(w > 0.0)[0]] - meshVar.data[:,0] = node_values[...] + meshVar.data[:, 0] = node_values[...] - # if there is no material found, - # impose a near-neighbour hunt for a valid material and set that one + # if there is no material found, + # impose a near-neighbour hunt for a valid material and set that one ind_w0 = np.where(w == 0.0)[0] if len(ind_w0) > 0: - ind_ = np.where(self.data[n[ind_w0]]==ii)[0] + ind_ = np.where(self.data[n[ind_w0]] == ii)[0] if len(ind_) > 0: meshVar.data[ind_w0[ind_]] = 1.0 elif self.update_type == 1: with self.swarm.access(): kd = uw.kdtree.KDTree(self.swarm.particle_coordinates.data) - #n_indices, n_distance = kd.find_closest_n_points(self.nnn,self._meshLevelSetVars[0].coords) - n_distance, n_indices = kd.query(self._meshLevelSetVars[0].coords,k=self.nnn) - + n_distance, n_indices = kd.query( + self._meshLevelSetVars[0].coords, k=self.nnn, sqr_dist=True + ) + for ii in range(self.indices): meshVar = self._meshLevelSetVars[ii] with self.swarm.mesh.access(meshVar), self.swarm.access(): @@ -762,37 +773,142 @@ def _update(self): w = np.zeros((meshVar.data.shape[0],)) for i in range(meshVar.data.shape[0]): if i not in self.ind_bc: - ind = np.where(n_distance[i,:] 0.0)[0]] /= w[np.where(w > 0.0)[0]] - meshVar.data[:,0] = node_values[...] + meshVar.data[:, 0] = node_values[...] - # if there is no material found, - # impose a near-neighbour hunt for a valid material and set that one + # if there is no material found, + # impose a near-neighbour hunt for a valid material and set that one ind_w0 = np.where(w == 0.0)[0] if len(ind_w0) > 0: - ind_ = np.where(self.data[n_indices[ind_w0]]==ii)[0] + ind_ = np.where(self.data[n_indices[ind_w0]] == ii)[0] if len(ind_) > 0: meshVar.data[ind_w0[ind_]] = 1.0 return -# @typechecked -class Swarm(Stateful, uw_object): +## This should be the basic swarm, and we can then create a sub-class that will +## be a PIC swarm + + +class PICSwarm(Stateful, uw_object): + """ + Particle swarm implementation with automatic mesh-particle interactions. + + The `Swarm` class is Underworld's primary particle management system, built on PETSc's + DMSWARM_PIC type. It provides automatic particle migration, mesh-particle connectivity, + and streamlined particle operations for Lagrangian particle tracking and data storage. + + Differences from UW Swarm: + - **Mesh Integration**: Built-in particle-in-cell (PIC) connectivity with automatic cell tracking + - **Migration**: Uses the standard PETSc strategy for migration which depends on the DM type. This requires + calculation of cell-relationships each time the coordinates are updated and particles that are not found will + be deleted. + + + Parameters + ---------- + mesh : uw.discretisation.Mesh + The mesh object that defines the computational domain. Particles will be + automatically associated with mesh cells for efficient spatial operations. + recycle_rate : int, optional + Rate at which particles are recycled for streak management. If > 1, enables + streak particle functionality where particles are duplicated and tracked + across multiple cycles. Default is 0 (no recycling). + verbose : bool, optional + Enable verbose output for debugging and monitoring particle operations. + Default is False. + + Attributes + ---------- + mesh : uw.discretisation.Mesh + Reference to the associated mesh object. + dim : int + Spatial dimension of the mesh (2D or 3D). + cdim : int + Coordinate dimension of the mesh. + data : numpy.ndarray + Direct access to particle coordinate data. + particle_coordinates : SwarmVariable + SwarmVariable containing particle coordinate information (auto-created). + particle_cellid : SwarmVariable + SwarmVariable containing particle cell ID information (auto-created). + recycle_rate : int + Current recycle rate for streak management. + cycle : int + Current cycle number for streak particles. + + Methods + ------- + populate_petsc(fill_param=1) + Populate swarm using PETSc's built-in particle generation. + populate(fill_param=1, layout=SwarmPICLayout.GAUSS) + Populate the swarm with particles using specified layout. + add_particles_with_coordinates(coords) + Add new particles at specified coordinate locations. + add_variable(name, size, dtype=float) + Add a new variable to track additional particle properties. + save(filename, meshUnits=1.0, swarmUnits=1.0, units="dimensionless") + Save swarm data to file. + read_timestep(filename, step_name, outputPath="./output/") + Read swarm data from a specific timestep file. + advection(V_fn, delta_t, evalf=False, corrector=True, restore_points_func=None) + Advect particles using a velocity field with automatic migration. + estimate_dt(V_fn, dt_min=1.0e-15, dt_max=1.0) + Estimate appropriate timestep for particle advection. + + Examples + -------- + Create a standard swarm with automatic features: + + >>> import underworld3 as uw + >>> mesh = uw.meshing.UnstructuredSimplexBox(minCoords=(0,0), maxCoords=(1,1)) + >>> swarm = uw.swarm.Swarm(mesh=mesh) + >>> swarm.populate(fill_param=2, layout=uw.swarm.SwarmPICLayout.GAUSS) + + Access automatic coordinate and cell ID fields: + + >>> coords = swarm.particle_coordinates.data + >>> cell_ids = swarm.particle_cellid.data + + Create a streak swarm with recycling: + + >>> streak_swarm = uw.swarm.Swarm(mesh=mesh, recycle_rate=5) + >>> streak_swarm.populate(fill_param=1) + + Add custom particle data and perform advection: + + >>> temperature = swarm.add_variable("temperature", 1) + >>> velocity_field = mesh.add_variable("velocity", mesh.dim) + >>> # ... set up velocity field ... + >>> swarm.advection(velocity_field.sym, delta_t=0.01) # Automatic migration + + Notes + ----- + - Particle migration occurs automatically during advection operations + - Coordinate and cell ID fields are created and managed automatically at the + PETSc level + + + """ + instances = 0 @timing.routine_timer_decorator @@ -888,31 +1004,6 @@ def __init__(self, mesh, recycle_rate=0, verbose=False): def mesh(self): return self._mesh - # The setter needs updating to account for re-distribution of the DM - # in the general case - see adaptivity.mesh2mesh_swarm() - - # @mesh.setter - # def mesh(self, new_mesh): - # self._mesh = new_mesh - # self.dm.setCellDM(new_mesh.dm) - - # # k-d tree indexing is no longer valid - # self._index = None - # self._nnmapdict = {} - - # cellid = self.dm.getField("DMSwarm_cellid") - # cellid[:] = 0 # new_mesh.get_closest_cells(coords).reshape(-1) - # self.dm.restoreField("DMSwarm_cellid") - # self.dm.migrate(remove_sent_points=True) - - # # Also need to re-proxy the swarm variables on the new mesh !! - # for v in self.vars: - # var = self.vars[v] - # var._create_proxy_variable() - # var._update() - - # return - @property def data(self): return self.particle_coordinates.data @@ -959,7 +1050,7 @@ def populate_petsc( (iii) DMSWARMPIC_LAYOUT_SUBDIVISION: 2D and 3D for quad/hex and 2D tri. So this means, simplex mesh in 3D only supports GAUSS - This is based - on the tensor product locations so it is not even in the cells. + on the tensor product locations so it is not uniform in the cells. """ if layout == None: @@ -980,7 +1071,7 @@ def populate_petsc( # self.dm.setLocalSizes((elend-elstart) * fill_param, 0) self.dm.insertPointUsingCellDM(self.layout.value, fill_param) - return # self # LM: Is there any reason to return self ? + return # @@ -1011,9 +1102,9 @@ def populate( if np.any(newp_cells0 > self.mesh._centroids.shape[0]): raise RuntimeError("Some new coordinates can't find a owning cell - Error") - #valid = newp_cells0 != -1 - #newp_coords = newp_coords0[valid] - #newp_cells = newp_cells0[valid] + # valid = newp_cells0 != -1 + # newp_coords = newp_coords0[valid] + # newp_cells = newp_cells0[valid] newp_coords = newp_coords0 newp_cells = newp_cells0 @@ -1080,13 +1171,6 @@ def populate( offset = swarm_orig_size * i self._remeshed.data[offset::, 0] = i - # Validate (eliminate if required) - - # cellid = self.dm.getField("DMSwarm_cellid") - # lost = np.where(cellid == -1) - # print(f"{uw.mpi.rank} - lost particles: {lost[0].shape} out of {cellid.shape}", flush=True) - # self.dm.restoreField("DMSwarm_cellid") - return @timing.routine_timer_decorator @@ -1680,8 +1764,8 @@ def _get_map(self, var): h.update(meshvar_coords) digest = h.intdigest() if digest not in self._nnmapdict: - #self._nnmapdict[digest] = self._index.find_closest_point(meshvar_coords)[0] - self._nnmapdict[digest] = self._index.query(meshvar_coords,k=1)[0] + # self._nnmapdict[digest] = self._index.find_closest_point(meshvar_coords)[0] + self._nnmapdict[digest] = self._index.query(meshvar_coords, k=1)[0] return self._nnmapdict[digest] @timing.routine_timer_decorator @@ -1754,6 +1838,8 @@ def advection( # del updated_current_coords # del v_at_Vpts + print(f"{ uw.mpi.rank}: Peace", flush=True) + # Wrap this whole thing in sub-stepping loop for step in range(0, substeps): @@ -1766,16 +1852,18 @@ def advection( with self.access(self.particle_coordinates): v_at_Vpts = np.zeros_like(self.particle_coordinates.data) - if evalf: - for d in range(self.dim): - v_at_Vpts[:, d] = uw.function.evalf( - V_fn_matrix[d], self.particle_coordinates.data - ).reshape(-1) - else: - for d in range(self.dim): - v_at_Vpts[:, d] = uw.function.evaluate( - V_fn_matrix[d], self.particle_coordinates.data - ).reshape(-1) + # if evalf: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evalf( + # V_fn_matrix[d], self.particle_coordinates.data + # ).reshape(-1) + # else: + for d in range(self.dim): + v_at_Vpts[:, d] = uw.function.evaluate( + V_fn_matrix[d], + self.particle_coordinates.data, + evalf=evalf, + ).reshape(-1) mid_pt_coords = ( self.particle_coordinates.data[...] @@ -1795,16 +1883,19 @@ def advection( v_at_Vpts = np.zeros_like(self.data) - if evalf: - for d in range(self.dim): - v_at_Vpts[:, d] = uw.function.evalf( - V_fn_matrix[d], self.particle_coordinates.data - ).reshape(-1) - else: - for d in range(self.dim): - v_at_Vpts[:, d] = uw.function.evaluate( - V_fn_matrix[d], self.particle_coordinates.data - ).reshape(-1) + # if evalf: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evalf( + # V_fn_matrix[d], self.particle_coordinates.data + # ).reshape(-1) + # else: + # + for d in range(self.dim): + v_at_Vpts[:, d] = uw.function.evaluate( + V_fn_matrix[d], + self.particle_coordinates.data, + evalf=evalf, + ).reshape(-1) # if (uw.mpi.rank == 0): # print("Re-launch from X0", flush=True) @@ -1825,16 +1916,18 @@ def advection( with self.access(self.particle_coordinates): v_at_Vpts = np.zeros_like(self.data) - if evalf: - for d in range(self.dim): - v_at_Vpts[:, d] = uw.function.evalf( - V_fn_matrix[d], self.data - ).reshape(-1) - else: - for d in range(self.dim): - v_at_Vpts[:, d] = uw.function.evaluate( - V_fn_matrix[d], self.data - ).reshape(-1) + # if evalf: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evalf( + # V_fn_matrix[d], self.data + # ).reshape(-1) + # else: + for d in range(self.dim): + v_at_Vpts[:, d] = uw.function.evaluate( + V_fn_matrix[d], + self.data, + evalf=evalf, + ).reshape(-1) new_coords = self.data + delta_t * v_at_Vpts / substeps @@ -1934,7 +2027,7 @@ def estimate_dt(self, V_fn): import math with self.access(): - vel = uw.function.evalf(V_fn, self.particle_coordinates.data) + vel = uw.function.evaluate(V_fn, self.particle_coordinates.data, evalf=True) try: magvel_squared = vel[:, 0] ** 2 + vel[:, 1] ** 2 if self.mesh.dim == 3: @@ -1961,7 +2054,7 @@ def estimate_dt(self, V_fn): return None -class NodalPointSwarm(Swarm): +class NodalPointPICSwarm(PICSwarm): r"""Swarm with particles located at the coordinate points of a meshVariable The swarmVariable `X0` is defined so that the particles can "snap back" to their original locations @@ -2078,3 +2171,1466 @@ def advection( ) return + + +## New - Basic Swarm (no PIC skillz) +## What is missing: +## - no celldm +## - PIC layouts of particles are not directly available / must be done by hand +## - No automatic migration - must compute ranks for the particle swarms +## - No automatic definition of coordinate fields (need to add by hand) + + +class Swarm(Stateful, uw_object): + """ + A basic particle swarm implementation for Lagrangian particle tracking and data storage. + + The UW `Swarm` class provides a simplified particle management system that uses + PETSc's DMSWARM_BASIC type. Unlike the standard `Swarm` class, this implementation + does not rely on PETSc to determine ranks for particle migration but instead uses + our own kdtree neighbour-domain computations. + + This class is preferred for most operations except where particle / cell relationships + are always required. + + Parameters + ---------- + mesh : uw.discretisation.Mesh + The mesh object that defines the computational domain for particle operations. + Particles will be associated with this mesh for spatial queries and operations. + recycle_rate : int, optional + Rate at which particles are recycled for streak management. If > 1, enables + streak particle functionality where particles are duplicated and tracked + across multiple cycles. Default is 0 (no recycling). + verbose : bool, optional + Enable verbose output for debugging and monitoring particle operations. + Default is False. + + Attributes + ---------- + mesh : uw.discretisation.Mesh + Reference to the associated mesh object. + dim : int + Spatial dimension of the mesh (2D or 3D). + cdim : int + Coordinate dimension of the mesh. + data : numpy.ndarray + Direct access to particle coordinate data. + particle_coordinates : SwarmVariable + SwarmVariable containing particle coordinate information. + recycle_rate : int + Current recycle rate for streak management. + cycle : int + Current cycle number for streak particles. + + Methods + ------- + populate(fill_param=1) + Populate the swarm with particles throughout the domain. + migrate(remove_sent_points=True, delete_lost_points=True, max_its=10) + Manually migrate particles across MPI processes after coordinate updates. + add_particles_with_coordinates(coords) + Add new particles at specified coordinate locations. + add_particles_with_global_coordinates(coords) + Add particles using global coordinate system. + add_variable(name, size, dtype=float) + Add a new variable to track additional particle properties. + save(filename, meshUnits=1.0, swarmUnits=1.0, units="dimensionless") + Save swarm data to file. + read_timestep(filename, step_name, outputPath="./output/") + Read swarm data from a specific timestep file. + advection(V_fn, delta_t, evalf=False, corrector=True, restore_points_func=None) + Advect particles using a velocity field. + estimate_dt(V_fn, dt_min=1.0e-15, dt_max=1.0) + Estimate appropriate timestep for particle advection. + + Examples + -------- + Create a basic swarm and populate with particles: + + >>> import underworld3 as uw + >>> mesh = uw.meshing.UnstructuredSimplexBox(minCoords=(0,0), maxCoords=(1,1)) + >>> swarm = uw.swarm.Swarm(mesh=mesh) + >>> swarm.populate(fill_param=2) + + Create a streak swarm with recycling: + + >>> streak_swarm = uw.swarm.Swarm(mesh=mesh, recycle_rate=5) + >>> streak_swarm.populate(fill_param=1) + + Add custom particle data: + + >>> temperature = swarm.add_variable("temperature", 1) + >>> velocity = swarm.add_variable("velocity", mesh.dim) + + Manual particle migration after coordinate updates: + + Note: particle migration is still called automatically when we + `access` and update the particle_coordinates variables + + Note: `swarm.populate` uses a the mesh point locations for discontinuous interpolants to + determine the particle locations. + + """ + + instances = 0 + + @timing.routine_timer_decorator + def __init__(self, mesh, recycle_rate=0, verbose=False): + Swarm.instances += 1 + + self.verbose = verbose + self._mesh = mesh + self.dim = mesh.dim + self.cdim = mesh.cdim + self.dm = PETSc.DMSwarm().create() + self.dm.setDimension(self.dim) + self.dm.setType(SwarmType.DMSWARM_BASIC.value) + self._data = None + + # Is the swarm a streak-swarm ? + self.recycle_rate = recycle_rate + self.cycle = 0 + + # dictionary for variables + + # import weakref (not helpful as garbage collection does not remove the fields from the DM) + # self._vars = weakref.WeakValueDictionary() + self._vars = {} + + # add variable to handle particle coords - match name from PIC_Swarm for consistency + self._coord_var = SwarmVariable( + "DMSwarmPIC_coor", + self, + self.cdim, + dtype=float, + _register=True, + _proxy=False, + rebuild_on_cycle=False, + ) + + # add variable to hold swarm coordinates during position updates + self._X0 = uw.swarm.SwarmVariable( + "DMSwarm_X0", + self, + self.cdim, + dtype=float, + _register=True, + _proxy=False, + rebuild_on_cycle=False, + ) + + # This is for swarm streak management: + # add variable to hold swarm origins + + if self.recycle_rate > 1: + + self._remeshed = uw.swarm.SwarmVariable( + "DMSwarm_remeshed", + self, + 1, + dtype=int, + _register=True, + _proxy=False, + rebuild_on_cycle=False, + ) + + self._X0_uninitialised = True + self._index = None + self._nnmapdict = {} + + super().__init__() + + @property + def mesh(self): + return self._mesh + + @property + def data(self): + return self.particle_coordinates.data + + @property + def particle_coordinates(self): + return self._coord_var + + @timing.routine_timer_decorator + def populate( + self, + fill_param: Optional[int] = 1, + ): + """ + Populate the swarm with particles throughout the domain. + + Parameters + ---------- + fill_param: + Parameter determining the particle count per cell (per dimension) + for the given layout, using the mesh degree. + """ + + self.fill_param = fill_param + + newp_coords0 = self.mesh._get_coords_for_basis(fill_param, continuous=False) + newp_cells0 = self.mesh.get_closest_local_cells(newp_coords0) + + valid = newp_cells0 != -1 + newp_coords = newp_coords0[valid] + newp_cells = newp_cells0[valid] + + self.dm.finalizeFieldRegister() + self.dm.addNPoints(newp_coords.shape[0] + 1) + + coords = self.dm.getField("DMSwarmPIC_coor").reshape((-1, self.dim)) + coords[...] = newp_coords[...] + self.dm.restoreField("DMSwarmPIC_coor") + + if self.recycle_rate > 1: + with self.access(): + # This is a mesh-local quantity, so let's just + # store it on the mesh in an ad_hoc fashion for now + + self.mesh.particle_X_orig = self.particle_coordinates.data.copy() + + with self.access(): + swarm_orig_size = self.particle_coordinates.data.shape[0] + all_local_coords = np.vstack( + (self.particle_coordinates.data,) * (self.recycle_rate) + ) + + swarm_new_size = all_local_coords.data.shape[0] + + self.dm.addNPoints(swarm_new_size - swarm_orig_size) + + coords = self.dm.getField("DMSwarmPIC_coor").reshape((-1, self.dim)) + + coords[...] = ( + all_local_coords[...] + + (0.33 / (1 + fill_param)) + * (np.random.random(size=all_local_coords.shape) - 0.5) + * 0.00001 + * self.mesh._search_lengths[all_local_cells] # typical cell size + ) + + self.dm.restoreField("DMSwarmPIC_coor") + + ## Now set the cycle values + + with self.access(self._remeshed): + for i in range(0, self.recycle_rate): + offset = swarm_orig_size * i + self._remeshed.data[offset::, 0] = i + + return + + @timing.routine_timer_decorator + def migrate( + self, + remove_sent_points=True, + delete_lost_points=True, + max_its=10, + ): + """ + Migrate swarm across processes after coordinates have been updated. + + The algorithm uses a global kD-tree for the centroids of the domains to decide the particle mpi.rank (send to the closest) + If the particles are mis-assigned to a particular mpi.rank, the next choice is the second-closest and so on. + + A few particles are still not found after this distribution process which probably means they are just outside the mesh. + If some points remain lost, they will be deleted if `delete_lost_points` is set. + + Implementation note: + We retained (above) the name `DMSwarmPIC_coor` for the particle field to allow this routine to be inherited by a PIC swarm + which has this field pre-defined. (We'd need to add a cellid field as well, and re-compute it upon landing) + """ + + from time import time + + time_c = time() + centroids = self.mesh._get_domain_centroids() + mesh_domain_kdtree = uw.kdtree.KDTree(centroids) + + time0 = time() + time1 = time() + + # This will only worry about particles that are not already claimed ! + # + + swarm_coord_array = self.dm.getField("DMSwarmPIC_coor").reshape((-1, self.dim)) + in_or_not = self.mesh.points_in_domain(swarm_coord_array) + self.dm.restoreField("DMSwarmPIC_coor") + + num_points_in_domain = np.count_nonzero(in_or_not == True) + num_points_not_in_domain = np.count_nonzero(in_or_not == False) + not_my_points = np.where(in_or_not == False)[0] + + uw.mpi.barrier() + + global_unclaimed_points = int( + uw.utilities.gather_data( + num_points_not_in_domain, bcast=True, dtype=int + ).sum() + ) + + global_claimed_points = int( + uw.utilities.gather_data(num_points_in_domain, bcast=True, dtype=int).sum() + ) + + # Unlikely, but we should check this + uw.mpi.barrier() + if global_unclaimed_points == 0: + return + + # Migrate particles between processors if appropriate + # Otherwise skip the next step and just remove missing points + # and tidy up. + + if uw.mpi.size > 1: + for it in range(0, min(max_its, uw.mpi.size)): + + # Send unclaimed points to next processor in line + + swarm_rank_array = self.dm.getField("DMSwarm_rank") + swarm_coord_array = self.dm.getField("DMSwarmPIC_coor").reshape( + (-1, self.dim) + ) + + if len(swarm_coord_array > 0): + dist, rank = mesh_domain_kdtree.query( + swarm_coord_array[not_my_points], + k=it + 1, + ) + + swarm_rank_array[not_my_points, 0] = rank.reshape(-1, it + 1)[:, it] + + self.dm.restoreField("DMSwarmPIC_coor") + self.dm.restoreField("DMSwarm_rank") + + # Now we send the points (basic migration) + self.dm.migrate(remove_sent_points=True) + uw.mpi.barrier() + + swarm_coord_array = self.dm.getField("DMSwarmPIC_coor").reshape( + (-1, self.dim) + ) + + in_or_not = self.mesh.points_in_domain(swarm_coord_array) + self.dm.restoreField("DMSwarmPIC_coor") + + num_points_in_domain = np.count_nonzero(in_or_not == True) + num_points_not_in_domain = np.count_nonzero(in_or_not == False) + not_my_points = np.where(in_or_not == False)[0] + + unclaimed_points_last_iteration = global_unclaimed_points + claimed_points_last_iteration = global_claimed_points + + global_unclaimed_points = int( + uw.utilities.gather_data( + num_points_not_in_domain, + bcast=True, + dtype=int, + ).sum() + ) + + global_claimed_points = int( + uw.utilities.gather_data( + num_points_in_domain, bcast=True, dtype=int + ).sum() + ) + + if ( + global_unclaimed_points == unclaimed_points_last_iteration + and global_claimed_points == claimed_points_last_iteration + ): + break + + # Missing points for deletion if required + if delete_lost_points: + + # print( + # f"{uw.mpi.rank} - Delete {len(not_my_points)} from swarm size {self.dm.getLocalSize()}", + # flush=True, + # ) + + uw.mpi.barrier() + if len(not_my_points > 0): + indices = np.sort(not_my_points)[::-1] + for index in indices: + self.dm.removePointAtIndex(index) + + # print( + # f"{uw.mpi.rank} - final swarm size {self.dm.getLocalSize()}", + # flush=True, + # ) + + return + + @timing.routine_timer_decorator + def add_particles_with_coordinates(self, coordinatesArray) -> int: + """ + Add particles to the swarm using particle coordinates provided + using a numpy array. + + Note that particles with coordinates NOT local to the current processor will + be rejected / ignored. + + Either include an array with all coordinates to all processors + or an array with the local coordinates. + + Parameters + ---------- + coordinatesArray : numpy.ndarray + The numpy array containing the coordinate of the new particles. Array is + expected to take shape n*dim, where n is the number of new particles, and + dim is the dimensionality of the swarm's supporting mesh. + + Returns + -------- + npoints: int + The number of points added to the local section of the swarm. + """ + + if not isinstance(coordinatesArray, np.ndarray): + raise TypeError("'coordinateArray' must be provided as a numpy array") + if not len(coordinatesArray.shape) == 2: + raise ValueError("The 'coordinateArray' is expected to be two dimensional.") + if not coordinatesArray.shape[1] == self.mesh.dim: + #### petsc appears to ignore columns that are greater than the mesh dim, but still worth including + raise ValueError( + """The 'coordinateArray' must have shape n*dim, where 'n' is the + number of particles to add, and 'dim' is the dimensionality of + the supporting mesh ({}).""".format( + self.mesh.dim + ) + ) + + valid = self.mesh.points_in_domain(coordinatesArray, strict_validation=True) + valid_coordinates = coordinatesArray[valid] + npoints = len(valid_coordinates) + swarm_size = self.dm.getLocalSize() + + # -1 means no particles have been added yet + if swarm_size == -1: + swarm_size = 0 + npoints = npoints + 1 + + self.dm.finalizeFieldRegister() + self.dm.addNPoints(npoints=npoints) + + coords = self.dm.getField("DMSwarmPIC_coor").reshape((-1, self.dim)) + coords[swarm_size::, :] = valid_coordinates[:, :] + self.dm.restoreField("DMSwarmPIC_coor") + + # Here we update the swarm cycle values as required + + if self.recycle_rate > 1: + with self.access(self._remeshed): + # self._Xorig.data[...] = coordinatesArray + self._remeshed.data[...] = 0 + + self.dm.migrate(remove_sent_points=True) + + return npoints + + @timing.routine_timer_decorator + def add_particles_with_global_coordinates( + self, globalCoordinatesArray, migrate=True + ) -> int: + """ + Add particles to the swarm using particle coordinates provided + using a numpy array. + + global coordinates: particles will be appropriately migrated + + Parameters + ---------- + globalCoordinatesArray : numpy.ndarray + The numpy array containing the coordinate of the new particles. Array is + expected to take shape n*dim, where n is the number of new particles, and + dim is the dimensionality of the swarm's supporting mesh. + + Returns + -------- + npoints: int + The number of points added to the local section of the swarm. + """ + + if not isinstance(globalCoordinatesArray, np.ndarray): + raise TypeError("'coordinateArray' must be provided as a numpy array") + if not len(globalCoordinatesArray.shape) == 2: + raise ValueError("The 'coordinateArray' is expected to be two dimensional.") + if not globalCoordinatesArray.shape[1] == self.mesh.dim: + #### petsc appears to ignore columns that are greater than the mesh dim, but still worth including + raise ValueError( + """The 'coordinateArray' must have shape n*dim, where 'n' is the + number of particles to add, and 'dim' is the dimensionality of + the supporting mesh ({}).""".format( + self.mesh.dim + ) + ) + + npoints = len(globalCoordinatesArray) + swarm_size = self.dm.getLocalSize() + + # -1 means no particles have been added yet + if swarm_size == -1: + swarm_size = 0 + npoints = npoints + 1 + + self.dm.finalizeFieldRegister() + self.dm.addNPoints(npoints=npoints) + + coords = self.dm.getField("DMSwarmPIC_coor").reshape((-1, self.dim)) + coords[swarm_size::, :] = globalCoordinatesArray[:, :] + self.dm.restoreField("DMSwarmPIC_coor") + + # Here we update the swarm cycle values as required + + if self.recycle_rate > 1: + with self.access(self._remeshed): + # self._Xorig.data[...] = globalCoordinatesArray + self._remeshed.data[...] = 0 + + if migrate: + self.migrate(remove_sent_points=True) + + return npoints + + @timing.routine_timer_decorator + def save( + self, + filename: int, + compression: Optional[bool] = False, + compressionType: Optional[str] = "gzip", + force_sequential=False, + ): + """ + + Save the swarm coordinates to a h5 file. + + Parameters + ---------- + filename : + The filename of the swarm checkpoint file to save to disk. + compression : + Add compression to the h5 files (saves space but increases write times with increasing no. of processors) + compressionType : + Type of compression to use, 'gzip' and 'lzf' supported. 'gzip' is default. Compression also needs to be set to 'True'. + + + + """ + if h5py.h5.get_config().mpi == False and comm.size > 1 and comm.rank == 0: + warnings.warn( + "Collective IO not possible as h5py not available in parallel mode. Switching to sequential. This will be slow for models running on multiple processors", + stacklevel=2, + ) + if filename.endswith(".h5") == False: + raise RuntimeError("The filename must end with .h5") + if compression == True and comm.rank == 0: + warnings.warn("Compression may slow down write times", stacklevel=2) + + if h5py.h5.get_config().mpi == True and not force_sequential: + # It seems to be a bad idea to mix mpi barriers with the access + # context manager so the copy-free version of this seems to hang + # when there are many active cores. This is probably why the parallel + # h5py write hangs + + with self.access(): + data_copy = self.data[:].copy() + + with h5py.File(f"{filename[:-3]}.h5", "w", driver="mpio", comm=comm) as h5f: + if compression == True: + h5f.create_dataset( + "coordinates", + data=data_copy[:], + compression=compressionType, + ) + else: + h5f.create_dataset("coordinates", data=data_copy[:]) + + del data_copy + + else: + # It seems to be a bad idea to mix mpi barriers with the access + # context manager so the copy-free version of this seems to hang + # when there are many active cores + + with self.access(): + data_copy = self.data[:].copy() + + if comm.rank == 0: + with h5py.File(f"{filename[:-3]}.h5", "w") as h5f: + if compression == True: + h5f.create_dataset( + "coordinates", + data=data_copy, + chunks=True, + maxshape=(None, data_copy.shape[1]), + compression=compressionType, + ) + else: + h5f.create_dataset( + "coordinates", + data=data_copy, + chunks=True, + maxshape=(None, data_copy.shape[1]), + ) + + comm.barrier() + for i in range(1, comm.size): + if comm.rank == i: + with h5py.File(f"{filename[:-3]}.h5", "a") as h5f: + h5f["coordinates"].resize( + (h5f["coordinates"].shape[0] + data_copy.shape[0]), + axis=0, + ) + # passive swarm, zero local particles is not unusual + if data_copy.shape[0] > 0: + h5f["coordinates"][-data_copy.shape[0] :] = data_copy[:] + comm.barrier() + comm.barrier() + + del data_copy + + return + + @timing.routine_timer_decorator + def read_timestep( + self, + base_filename: str, + swarm_id: str, + index: int, + outputPath: Optional[str] = "", + ): + output_base_name = os.path.join(outputPath, base_filename) + swarm_file = output_base_name + f".{swarm_id}.{index:05}.h5" + + ### open up file with coords on all procs + with h5py.File(f"{swarm_file}", "r") as h5f: + coordinates = h5f["coordinates"][:] + + #### utilises the UW function for adding a swarm by an array + self.add_particles_with_coordinates(coordinates) + + return + + @timing.routine_timer_decorator + def add_variable( + self, + name, + size=1, + dtype=float, + proxy_degree=2, + _nn_proxy=False, + ): + return SwarmVariable( + name, + self, + size, + dtype=dtype, + proxy_degree=proxy_degree, + _nn_proxy=_nn_proxy, + ) + + @timing.routine_timer_decorator + def petsc_save_checkpoint( + self, + swarmName: str, + index: int, + outputPath: Optional[str] = "", + ): + """ + + Use PETSc to save the swarm and attached data to a .pbin and xdmf file. + + Parameters + ---------- + swarmName : + Name of the swarm to save. + index : + An index which might correspond to the timestep or output number (for example). + outputPath : + Path to save the data. If left empty it will save the data in the current working directory. + """ + + x_swarm_fname = f"{outputPath}{swarmName}_{index:05d}.xmf" + self.dm.viewXDMF(x_swarm_fname) + + @timing.routine_timer_decorator + def write_timestep( + self, + filename: str, + swarmname: str, + index: int, + swarmVars: Optional[list] = None, + outputPath: Optional[str] = "", + time: Optional[int] = None, + compression: Optional[bool] = False, + compressionType: Optional[str] = "gzip", + force_sequential: Optional[bool] = False, + ): + """ + + Save data to h5 and a corresponding xdmf for visualisation using h5py. + + Parameters + ---------- + swarmName : + Name of the swarm to save. + swarmVars : + List of swarm objects to save. + index : + An index which might correspond to the timestep or output number (for example). + outputPath : + Path to save the data. If left empty it will save the data in the current working directory. + time : + Attach the time to the generated xdmf. + compression : + Whether to compress the h5 files [bool]. + compressionType : + The type of compression to use. 'gzip' and 'lzf' are the supported types, with 'gzip' as the default. + """ + + # This will eliminate the issue of whether or not to put path separators in the + # outputPath. Also does the right thing if outputPath is "" + + output_base_name = os.path.join(outputPath, filename) + "." + swarmname + + # check the directory where we will write checkpoint + dir_path = os.path.dirname(output_base_name) # get directory + + # check if path exists + if os.path.exists(os.path.abspath(dir_path)): # easier to debug abs + pass + else: + raise RuntimeError(f"{os.path.abspath(dir_path)} does not exist") + + # check if we have write access + if os.access(os.path.abspath(dir_path), os.W_OK): + pass + else: + raise RuntimeError(f"No write access to {os.path.abspath(dir_path)}") + + # could also try to coerce this to be a list and raise if it fails (tuple, singleton ... ) + # also ... why the typechecking if this can still happen + + if swarmVars is not None and not isinstance(swarmVars, list): + raise RuntimeError("`swarmVars` does not appear to be a list.") + + else: + ### save the swarm particle location + self.save( + filename=f"{output_base_name}.{index:05d}.h5", + compression=compression, + compressionType=compressionType, + force_sequential=force_sequential, + ) + + #### Generate a h5 file for each field + if swarmVars != None: + for field in swarmVars: + field.save( + filename=f"{output_base_name}.{field.name}.{index:05d}.h5", + compression=compression, + compressionType=compressionType, + force_sequential=force_sequential, + ) + + if uw.mpi.rank == 0: + ### only need to combine the h5 files to a single xdmf on one proc + with open(f"{output_base_name}.{index:05d}.xdmf", "w") as xdmf: + # Write the XDMF header + xdmf.write('\n') + xdmf.write( + '\n' + ) + xdmf.write("\n") + xdmf.write( + f'\n' + ) + + if time != None: + xdmf.write(f' \n") + xdmf.write("\n") + xdmf.write("\n") + + @property + def vars(self): + return self._vars + + def access(self, *writeable_vars: SwarmVariable): + """ + This context manager makes the underlying swarm variables data available to + the user. The data should be accessed via the variables `data` handle. + + As default, all data is read-only. To enable writeable data, the user should + specify which variable they wish to modify. + + At the conclusion of the users context managed block, numerous further operations + will be automatically executed. This includes swarm parallel migration routines + where the swarm's `particle_coordinates` variable has been modified. The swarm + variable proxy mesh variables will also be updated for modifed swarm variables. + + Parameters + ---------- + writeable_vars + The variables for which data write access is required. + + Example + ------- + + >>> import underworld3 as uw + >>> someMesh = uw.discretisation.FeMesh_Cartesian() + >>> with someMesh.deform_mesh(): + ... someMesh.data[0] = [0.1,0.1] + >>> someMesh.data[0] + array([ 0.1, 0.1]) + """ + import time + + uw.timing._incrementDepth() + stime = time.time() + + deaccess_list = [] + for var in self._vars.values(): + # if already accessed within higher level context manager, continue. + if var._is_accessed == True: + continue + # set flag so variable status can be known elsewhere + var._is_accessed = True + # add to de-access list to rewind this later + deaccess_list.append(var) + # grab numpy object, setting read only if necessary + var._data = self.dm.getField(var.clean_name).reshape( + (-1, var.num_components) + ) + assert var._data is not None + if var not in writeable_vars: + var._old_data_flag = var._data.flags.writeable + var._data.flags.writeable = False + else: + # increment variable state + var._increment() + + # make view for each var component + if var._proxy: + for i in range(0, var.shape[0]): + for j in range(0, var.shape[1]): + var._data_container[i, j] = var._data_container[i, j]._replace( + data=var.data[:, var._data_layout(i, j)], + ) + + # if particles moving, update swarm state + if self.particle_coordinates in writeable_vars: + self._increment() + + # Create a class which specifies the required context + # manager hooks (`__enter__`, `__exit__`). + class exit_manager: + def __init__(self, swarm): + self.em_swarm = swarm + + def __enter__(self): + + pass + + def __exit__(self, *args): + + for var in self.em_swarm.vars.values(): + # only de-access variables we have set access for. + if var not in deaccess_list: + continue + # set this back, although possibly not required. + if var not in writeable_vars: + var._data.flags.writeable = var._old_data_flag + var._data = None + self.em_swarm.dm.restoreField(var.clean_name) + var._is_accessed = False + # do particle migration if coords changes + + if self.em_swarm.particle_coordinates in writeable_vars: + # let's use the mesh index to update the particles owning cells. + # note that the `petsc4py` interface is more convenient here as the + # `SwarmVariable.data` interface is controlled by the context manager + # that we are currently within, and it is therefore too easy to + # get things wrong that way. + # + + if uw.mpi.size > 1: + + coords = self.em_swarm.dm.getField("DMSwarmPIC_coor").reshape( + (-1, self.em_swarm.dim) + ) + + self.em_swarm.dm.restoreField("DMSwarmPIC_coor") + + ## We'll need to identify the new processes here and update the particle rank value accordingly + self.em_swarm.migrate(remove_sent_points=True) + + # void these things too + self.em_swarm._index = None + self.em_swarm._nnmapdict = {} + + # do var updates + for var in self.em_swarm.vars.values(): + # if swarm migrated, update all. + # if var updated, update var. + if (self.em_swarm.particle_coordinates in writeable_vars) or ( + var in writeable_vars + ): + var._update() + + if var._proxy: + for i in range(0, var.shape[0]): + for j in range(0, var.shape[1]): + # var._data_ij[i, j] = None + var._data_container[i, j] = var._data_container[ + i, j + ]._replace( + data=f"SwarmVariable[...].data is only available within mesh.access() context", + ) + + uw.timing._decrementDepth() + uw.timing.log_result(time.time() - stime, "Swarm.access", 1) + + return exit_manager(self) + + ## Better to have one master copy - this one is cut'n'pasted from + ## the MeshVariable class + + def _data_layout(self, i, j=None): + # mapping + + if self.vtype == uw.VarType.SCALAR: + return 0 + if self.vtype == uw.VarType.VECTOR: + if j is None: + return i + elif i == 0: + return j + else: + raise IndexError( + f"Vectors have shape {self.mesh.dim} or {(1, self.mesh.dim)} " + ) + if self.vtype == uw.VarType.TENSOR: + if self.mesh.dim == 2: + return ((0, 1), (2, 3))[i][j] + else: + return ((0, 1, 2), (3, 4, 5), (6, 7, 8))[i][j] + + if self.vtype == uw.VarType.SYM_TENSOR: + if self.mesh.dim == 2: + return ((0, 2), (2, 1))[i][j] + else: + return ((0, 3, 4), (3, 1, 5), (4, 5, 2))[i][j] + + if self.vtype == uw.VarType.MATRIX: + return i + j * self.shape[0] + + ## Check this - the interface to kdtree has changed, are we picking the correct field ? + @timing.routine_timer_decorator + def _get_map(self, var): + # generate tree if not avaiable + if not self._index: + with self.access(): + self._index = uw.kdtree.KDTree(self.data) + + # get or generate map + meshvar_coords = var._meshVar.coords + # we can't use numpy arrays directly as keys in python dicts, so + # we'll use `xxhash` to generate a hash of array. + # this shouldn't be an issue performance wise but we should test to be + # sufficiently confident of this. + import xxhash + + h = xxhash.xxh64() + h.update(meshvar_coords) + digest = h.intdigest() + if digest not in self._nnmapdict: + self._nnmapdict[digest] = self._index.query(meshvar_coords, k=1)[1] + return self._nnmapdict[digest] + + @timing.routine_timer_decorator + def advection( + self, + V_fn, + delta_t, + order=2, + corrector=False, + restore_points_to_domain_func=None, + evalf=False, + step_limit=False, + ): + + dt_limit = self.estimate_dt(V_fn) + + if step_limit and dt_limit is not None: + substeps = int(max(1, round(abs(delta_t) / dt_limit))) + else: + substeps = 1 + + if True or uw.mpi.rank == 0 and self.verbose: + print(f"Substepping {substeps} / {abs(delta_t) / dt_limit}, {delta_t} ") + + # X0 holds the particle location at the start of advection + # This is needed because the particles may be migrated off-proc + # during timestepping. + + X0 = self._X0 + + V_fn_matrix = self.mesh.vector.to_matrix(V_fn) + + # Use current velocity to estimate where the particles would have + # landed in an implicit step. WE CANT DO THIS WITH SUB-STEPPING unless + # We have a lot more information about the previous launch point / timestep + # Also: how does this interact with the particle restoration function ? + + # if corrector == True and not self._X0_uninitialised: + # with self.access(self.particle_coordinates): + # v_at_Vpts = np.zeros_like(self.data) + + # if evalf: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evalf( + # V_fn_matrix[d], self.data + # ).reshape(-1) + # else: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evaluate( + # V_fn_matrix[d], self.data + # ).reshape(-1) + + # corrected_position = X0.data.copy() + delta_t * v_at_Vpts + # if restore_points_to_domain_func is not None: + # corrected_position = restore_points_to_domain_func( + # corrected_position + # ) + + # updated_current_coords = 0.5 * (corrected_position + self.data.copy()) + + # # validate_coords to ensure they live within the domain (or there will be trouble) + + # if restore_points_to_domain_func is not None: + # updated_current_coords = restore_points_to_domain_func( + # updated_current_coords + # ) + + # self.data[...] = updated_current_coords[...] + + # del updated_current_coords + # del v_at_Vpts + + # Wrap this whole thing in sub-stepping loop + for step in range(0, substeps): + + with self.access(X0): + X0.data[...] = self.particle_coordinates.data[...] + + # Mid point algorithm (2nd order) + + if order == 2: + with self.access(self.particle_coordinates): + v_at_Vpts = np.zeros_like(self.particle_coordinates.data) + + ## + ## Here we should check for particles which are interpolated and + ## those which can only be extrapolated. For the former, evalf is + ## not needed but for the latter it is essential + ## + + # if evalf: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evalf( + # V_fn_matrix[d], self.particle_coordinates.data + # ).reshape(-1) + # else: + for d in range(self.dim): + v_at_Vpts[:, d] = uw.function.evaluate( + V_fn_matrix[d], + self.particle_coordinates.data, + evalf=evalf, + ).reshape(-1) + + mid_pt_coords = ( + self.particle_coordinates.data[...] + + 0.5 * delta_t * v_at_Vpts / substeps + ) + + # validate_coords to ensure they live within the domain (or there will be trouble) + + if restore_points_to_domain_func is not None: + mid_pt_coords = restore_points_to_domain_func(mid_pt_coords) + + self.particle_coordinates.data[...] = mid_pt_coords[...] + + del mid_pt_coords + + ## Let the swarm be updated, and then move the rest of the way + + v_at_Vpts = np.zeros_like(self.data) + + # if evalf: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evalf( + # V_fn_matrix[d], self.particle_coordinates.data + # ).reshape(-1) + # else: + for d in range(self.dim): + v_at_Vpts[:, d] = uw.function.evaluate( + V_fn_matrix[d], + self.particle_coordinates.data, + evalf=evalf, + ).reshape(-1) + + # if (uw.mpi.rank == 0): + # print("Re-launch from X0", flush=True) + + new_coords = X0.data[...] + delta_t * v_at_Vpts / substeps + + # validate_coords to ensure they live within the domain (or there will be trouble) + if restore_points_to_domain_func is not None: + new_coords = restore_points_to_domain_func(new_coords) + + self.particle_coordinates.data[...] = new_coords[...] + + del new_coords + del v_at_Vpts + + # forward Euler (1st order) + else: + + with self.access(self.particle_coordinates): + v_at_Vpts = np.zeros_like(self.data) + + # if evalf: + # for d in range(self.dim): + # v_at_Vpts[:, d] = uw.function.evalf( + # V_fn_matrix[d], self.data + # ).reshape(-1) + # else: + for d in range(self.dim): + v_at_Vpts[:, d] = uw.function.evaluate( + V_fn_matrix[d], + self.data, + evalf=evalf, + ).reshape(-1) + + new_coords = self.data + delta_t * v_at_Vpts / substeps + + # validate_coords to ensure they live within the domain (or there will be trouble) + + if restore_points_to_domain_func is not None: + new_coords = restore_points_to_domain_func(new_coords) + + self.data[...] = new_coords[...].copy() + + ## End of substepping loop + + ## Cycling of the swarm is a cheap and cheerful version of population control for particles. It turns the + ## swarm into a streak-swarm where particles are Lagrangian for a number of steps and then reset to their + ## original location. + + if self.recycle_rate > 1: + # Restore particles which have cycle == cycle rate (use >= just in case) + + # Remove remesh points and recreate a new set at the mesh-local + # locations that we already have stored. + + with self.access(self.particle_coordinates, self._remeshed): + remeshed = self._remeshed.data[:, 0] == 0 + # This is one way to do it ... we can do this better though + self.data[remeshed, 0] = 1.0e100 + + swarm_size = self.dm.getLocalSize() + + num_remeshed_points = self.mesh.particle_X_orig.shape[0] + + self.dm.addNPoints(num_remeshed_points) + + ## cellid = self.dm.getField("DMSwarm_cellid") + coords = self.dm.getField("DMSwarmPIC_coor").reshape((-1, self.dim)) + rmsh = self.dm.getField("DMSwarm_remeshed") + + # print(f"cellid -> {cellid.shape}") + # print(f"particle coords -> {coords.shape}") + # print(f"remeshed points -> {num_remeshed_points}") + + perturbation = 0.00001 * ( + (0.33 / (1 + self.fill_param)) + * (np.random.random(size=(num_remeshed_points, self.dim)) - 0.5) + * self.mesh._radii[cellid[swarm_size::]].reshape(-1, 1) + ) + + coords[swarm_size::] = self.mesh.particle_X_orig[:, :] + perturbation + ## cellid[swarm_size::] = self.mesh.particle_CellID_orig[:, 0] + rmsh[swarm_size::] = 0 + + # self.dm.restoreField("DMSwarm_cellid") + self.dm.restoreField("DMSwarmPIC_coor") + self.dm.restoreField("DMSwarm_remeshed") + + # when we let this go, the particles may be re-distributed to + # other processors, and we will need to rebuild the remeshed + # array before trying to compute / assign values to variables + + for swarmVar in self.vars.values(): + if swarmVar._rebuild_on_cycle: + with self.access(swarmVar): + if swarmVar.dtype is int: + nnn = 1 + else: + nnn = self.mesh.dim + 1 # 3 for triangles, 4 for tets ... + + interpolated_values = ( + swarmVar.rbf_interpolate(self.mesh.particle_X_orig, nnn=nnn) + # swarmVar._meshVar.fn, self.mesh.particle_X_orig + # ) + ).astype(swarmVar.dtype) + + swarmVar.data[swarm_size::] = interpolated_values + + ## + ## Determine RANK + ## + + # Migrate will already have been called by the access manager. + # Maybe we should hash the local particle coords to make this + # a little more user-friendly + + # self.dm.migrate(remove_sent_points=True) + + with self.access(self._remeshed): + self._remeshed.data[...] = np.mod( + self._remeshed.data[...] - 1, self.recycle_rate + ) + + self.cycle += 1 + + ## End of cycle_swarm loop + + return + + @timing.routine_timer_decorator + def estimate_dt(self, V_fn): + """ + Calculates an appropriate advective timestep for the given + mesh and velocity configuration. + """ + # we'll want to do this on an element by element basis + # for more general mesh + + # first let's extract a max global velocity magnitude + import math + + with self.access(): + vel = uw.function.evaluate(V_fn, self.particle_coordinates.data, evalf=True) + try: + magvel_squared = vel[:, 0] ** 2 + vel[:, 1] ** 2 + if self.mesh.dim == 3: + magvel_squared += vel[:, 2] ** 2 + + max_magvel = math.sqrt(magvel_squared.max()) + + except (ValueError, IndexError): + max_magvel = 0.0 + + from mpi4py import MPI + + max_magvel_glob = comm.allreduce(max_magvel, op=MPI.MAX) + + min_dx = self.mesh.get_min_radius() + + # The assumption should be that we cross one or two elements (2-4 radii), not more, + # in a single step (order 2, means one element per half-step or something + # that we can broadly interpret that way) + + if max_magvel_glob != 0.0: + return min_dx / max_magvel_glob + else: + return None + + +class NodalPointUWSwarm(Swarm): + r"""BASIC_Swarm with particles located at the coordinate points of a meshVariable + + The swarmVariable `X0` is defined so that the particles can "snap back" to their original locations + after they have been moved. + + The purpose of this Swarm is to manage sample points for advection schemes based on upstream sampling + (method of characteristics etc)""" + + def __init__( + self, + trackedVariable: uw.discretisation.MeshVariable, + verbose=False, + ): + self.trackedVariable = trackedVariable + self.swarmVariable = None + + mesh = trackedVariable.mesh + + # Set up a standard swarm + super().__init__(mesh, verbose) + + nswarm = self + + meshVar_name = trackedVariable.clean_name + meshVar_symbol = trackedVariable.symbol + + ks = str(self.instance_number) + name = f"{meshVar_name}_star" + symbol = rf"{{ {meshVar_symbol} }}^{{ <*> }}" + + self.swarmVariable = uw.swarm.SwarmVariable( + name, + nswarm, + vtype=trackedVariable.vtype, + _proxy=False, + # proxy_degree=trackedVariable.degree, + # proxy_continuous=trackedVariable.continuous, + varsymbol=symbol, + ) + + # The launch point location + name = f"ns_X0_{ks}" + symbol = r"X0^{*^{{[" + ks + "]}}}" + nX0 = uw.swarm.SwarmVariable(name, nswarm, nswarm.dim, _proxy=False) + + # The launch point index + name = f"ns_I_{ks}" + symbol = r"I^{*^{{[" + ks + "]}}}" + nI0 = uw.swarm.SwarmVariable(name, nswarm, 1, dtype=int, _proxy=False) + + # The launch point processor rank + name = f"ns_R0_{ks}" + symbol = r"R0^{*^{{[" + ks + "]}}}" + nR0 = uw.swarm.SwarmVariable(name, nswarm, 1, dtype=int, _proxy=False) + + nswarm.dm.finalizeFieldRegister() + nswarm.dm.addNPoints( + trackedVariable.coords.shape[0] + 1 + ) # why + 1 ? That's the number of spots actually allocated + + # cellid = nswarm.dm.getField("DMSwarm_cellid") + coords = nswarm.dm.getField("DMSwarmPIC_coor").reshape((-1, nswarm.dim)) + coords[...] = trackedVariable.coords[...] + + cellid = self.mesh.get_closest_cells( + coords, + ) + + # Move slightly within the chosen cell to avoid edge effects + centroid_coords = self.mesh._centroids[cellid] + + shift = 0.01 + coords[:, :] = (1.0 - shift) * coords[:, :] + shift * centroid_coords[:, :] + + nswarm.dm.restoreField("DMSwarmPIC_coor") + # nswarm.dm.restoreField("DMSwarm_cellid") + + nswarm.dm.migrate(remove_sent_points=True) + + with nswarm.access(nX0, nI0): + nX0.data[:, :] = coords + nI0.data[:, 0] = range(0, coords.shape[0]) + + self._nswarm = nswarm + self._nX0 = nX0 + self._nI0 = nI0 + self._nR0 = nR0 + + return + + @timing.routine_timer_decorator + def advection( + self, + V_fn, + delta_t, + order=2, + corrector=False, + restore_points_to_domain_func=None, + evalf=False, + step_limit=True, + ): + + with self.access(self._X0): + self._X0.data[...] = self._nX0.data[...] + + with self.access(self._nR0): + self._nR0.data[...] = uw.mpi.rank + + super().advection( + V_fn, + delta_t, + order, + corrector, + restore_points_to_domain_func, + evalf, + step_limit, + ) + + return + + +## New - Basic Swarm (no PIC skillz) +## What is missing: +## - no celldm +## - PIC layouts of particles are not directly available / must be done by hand +## - No automatic migration - must compute ranks for the particle swarms +## - No automatic definition of coordinate fields (need to add by hand) diff --git a/src/underworld3/systems/ddt.py b/src/underworld3/systems/ddt.py index c41c4869..0005697f 100644 --- a/src/underworld3/systems/ddt.py +++ b/src/underworld3/systems/ddt.py @@ -7,7 +7,6 @@ import underworld3 as uw from underworld3 import VarType -# from underworld3.swarm import NodalPointSwarm, Swarm import underworld3.timing as timing from underworld3.utilities._api_tools import uw_object @@ -18,6 +17,7 @@ # class Eulerian(uw_object): # etc etc... + class Symbolic(uw_object): r""" Symbolic History Manager: @@ -60,11 +60,11 @@ def __init__( psi_fn = sympy.Matrix([[psi_fn]]) self._psi_fn = psi_fn # stored with its native shape self._shape = psi_fn.shape # capture the shape - + # Set the display symbol for psi_fn and for the history variable. - self._psi_fn_symbol = varsymbol # e.g. "\psi" - self._psi_star_symbol = varsymbol + r"^\ast" # e.g. "\psi^\ast" - + self._psi_fn_symbol = varsymbol # e.g. "\psi" + self._psi_star_symbol = varsymbol + r"^\ast" # e.g. "\psi^\ast" + # Create the history list: each element is a Matrix of shape _shape. self.psi_star = [sympy.zeros(*self._shape) for _ in range(order)] self.initiate_history_fn() @@ -88,11 +88,14 @@ def psi_fn(self, new_fn): def _object_viewer(self): from IPython.display import Latex, display + # Display the primary variable display(Latex(rf"$\quad {self._psi_fn_symbol} = {sympy.latex(self._psi_fn)}$")) # Display the history variable using the different symbol. history_latex = ", ".join([sympy.latex(elem) for elem in self.psi_star]) - display(Latex(rf"$\quad {self._psi_star_symbol} = \left[{history_latex}\right]$")) + display( + Latex(rf"$\quad {self._psi_star_symbol} = \left[{history_latex}\right]$") + ) def update_history_fn(self): # Update the first history element with a copy of the current ψ. @@ -129,7 +132,6 @@ def update_post_solve( if verbose: print(f"Updating history for ψ = {self.psi_fn}", flush=True) - # Shift history: copy each element down the chain. for i in range(self.order - 1, 0, -1): self.psi_star[i] = self.psi_star[i - 1].copy() @@ -138,20 +140,25 @@ def update_post_solve( def bdf(self, order: Optional[int] = None): r"""Compute the backward differentiation approximation of the time-derivative of ψ. - For order 1: bdf ≡ ψ - psi_star[0] + For order 1: bdf ≡ ψ - psi_star[0] """ if order is None: order = self.order else: order = max(1, min(self.order, order)) - + with sympy.core.evaluate(False): if order == 1: bdf0 = self.psi_fn - self.psi_star[0] elif order == 2: - bdf0 = (3 * self.psi_fn / 2 - 2 * self.psi_star[0] + self.psi_star[1] / 2) + bdf0 = 3 * self.psi_fn / 2 - 2 * self.psi_star[0] + self.psi_star[1] / 2 elif order == 3: - bdf0 = (11 * self.psi_fn / 6 - 3 * self.psi_star[0] + (3 * self.psi_star[1]) / 2 - self.psi_star[2] / 3) + bdf0 = ( + 11 * self.psi_fn / 6 + - 3 * self.psi_star[0] + + (3 * self.psi_star[1]) / 2 + - self.psi_star[2] / 3 + ) return bdf0 def adams_moulton_flux(self, order: Optional[int] = None): @@ -159,16 +166,22 @@ def adams_moulton_flux(self, order: Optional[int] = None): order = self.order else: order = max(1, min(self.order, order)) - + with sympy.core.evaluate(False): if order == 1: am = self.theta * self.psi_fn + (1.0 - self.theta) * self.psi_star[0] elif order == 2: am = (5 * self.psi_fn + 8 * self.psi_star[0] - self.psi_star[1]) / 12 elif order == 3: - am = (9 * self.psi_fn + 19 * self.psi_star[0] - 5 * self.psi_star[1] + self.psi_star[2]) / 24 + am = ( + 9 * self.psi_fn + + 19 * self.psi_star[0] + - 5 * self.psi_star[1] + + self.psi_star[2] + ) / 24 return am + class Eulerian(uw_object): r"""Eulerian (mesh based) History Manager: This manages the update of a variable, $\psi$ on the mesh across timesteps. @@ -218,10 +231,10 @@ def __init__( # meshVariable (storage) if isinstance(psi_fn, uw.discretisation._MeshVariable): - self._psi_fn = psi_fn.sym ### get symbolic form of the meshvariable + self._psi_fn = psi_fn.sym ### get symbolic form of the meshvariable self._psi_meshVar = psi_fn else: - self._psi_fn = psi_fn ### already in symbolic form + self._psi_fn = psi_fn ### already in symbolic form self._psi_meshVar = None self.order = order @@ -229,7 +242,6 @@ def __init__( psi_star = [] self.psi_star = psi_star - for i in range(order): self.psi_star.append( uw.discretisation.MeshVariable( @@ -245,7 +257,6 @@ def __init__( # print('initiating history fn', flush=True) ### Initiate first history value in chain self.initiate_history_fn() - return @@ -274,8 +285,7 @@ def _object_viewer(self): # ) display(Latex(rf"$\quad$History steps = {self.order}")) - - def _setup_projections(self): + def _setup_projections(self): ### using this to store terms that can't be evaluated (e.g. derivatives) # The projection operator for mapping derivative values to the mesh - needs to be different for each variable type, unfortunately ... if self.vtype == uw.VarType.SCALAR: @@ -306,7 +316,7 @@ def _setup_projections(self): self._psi_star_projection_solver.uw_function = self.psi_fn self._psi_star_projection_solver.bcs = self.bcs self._psi_star_projection_solver.smoothing = self.smoothing - + def update_history_fn(self): ### update first value in history chain ### avoids projecting if function can be evaluated @@ -317,13 +327,19 @@ def update_history_fn(self): self.psi_star[0].data[...] = self._psi_meshVar.data[...] # print('copying data', flush=True) except: - if self.evalf: - self.psi_star[0].data[...] = uw.function.evalf(self.psi_fn, self.psi_star[0].coords).reshape(-1, max(self.psi_fn.shape)) - # print('evalf data', flush=True) - else: - self.psi_star[0].data[...] = uw.function.evaluate(self.psi_fn, self.psi_star[0].coords).reshape(-1, max(self.psi_fn.shape)) - # print('evaluate data', flush=True) - + # if self.evalf: + # self.psi_star[0].data[...] = uw.function.evalf( + # self.psi_fn, self.psi_star[0].coords + # ).reshape(-1, max(self.psi_fn.shape)) + # # print('evalf data', flush=True) + # else: + self.psi_star[0].data[...] = uw.function.evaluate( + self.psi_fn, + self.psi_star[0].coords, + evalf=evalf, + ).reshape(-1, max(self.psi_fn.shape)) + # print('evaluate data', flush=True) + except: self._setup_projections() self._psi_star_projection_solver.solve() @@ -338,7 +354,7 @@ def initiate_history_fn(self): self.psi_star[i].data[...] = self.psi_star[0].data[...] return - + def update( self, evalf: Optional[bool] = False, @@ -367,12 +383,11 @@ def update_post_solve( if verbose and uw.mpi.rank == 0: print(f"Update {self.psi_fn}", flush=True) - ### copy values down the chain for i in range(self.order - 1, 0, -1): with self.mesh.access(self.psi_star[i]): - self.psi_star[i].data[...] = self.psi_star[i-1].data[...] - + self.psi_star[i].data[...] = self.psi_star[i - 1].data[...] + ### update the history fn self.update_history_fn() # ### update the first value in the chain @@ -539,7 +554,7 @@ def __init__( ) # We just need one swarm since this is inherently a sequential operation - nswarm = uw.swarm.NodalPointSwarm(self._workVar, verbose) + nswarm = uw.swarm.NodalPointUWSwarm(self._workVar, verbose) self._nswarm_psi = nswarm # The projection operator for mapping swarm values to the mesh - needs to be different for @@ -661,8 +676,8 @@ def update_pre_solve( order=1, corrector=False, restore_points_to_domain_func=self.mesh.return_coords_to_bounds, - evalf=False, - step_limit=True, + evalf=evalf, + step_limit=False, #! substepping: this seems to be too diffusive if left on. #! Check the code carefully ! ) @@ -673,30 +688,31 @@ def update_pre_solve( # is required instead. try: - # Note, we should honour the evalf flag here too. with self._workVar.mesh.access(): self.psi_star[0].data[...] = uw.function.evaluate( - self.psi_fn, self.psi_star[0].coords + self.psi_fn, + self.psi_star[0].coords, + evalf=evalf, ) except: self._psi_star_projection_solver.uw_function = self.psi_fn self._psi_star_projection_solver.smoothing = 0.0 self._psi_star_projection_solver.solve(verbose=verbose) - if evalf: - with self._nswarm_psi.access(self._nswarm_psi.swarmVariable): - for d in range(self.psi_star[i].shape[1]): - self._nswarm_psi.swarmVariable.data[:, d] = uw.function.evalf( - self.psi_star[i].sym[d], self._nswarm_psi.data - ) - else: - with self._nswarm_psi.access(self._nswarm_psi.swarmVariable): - for d in range(self.psi_star[i].shape[1]): - self._nswarm_psi.swarmVariable.data[ - :, d - ] = uw.function.evaluate( - self.psi_star[i].sym[d], self._nswarm_psi.data - ) + # if evalf: + # with self._nswarm_psi.access(self._nswarm_psi.swarmVariable): + # for d in range(self.psi_star[i].shape[1]): + # self._nswarm_psi.swarmVariable.data[:, d] = uw.function.evalf( + # self.psi_star[i].sym[d], self._nswarm_psi.data + # ) + # else: + with self._nswarm_psi.access(self._nswarm_psi.swarmVariable): + for d in range(self.psi_star[i].shape[1]): + self._nswarm_psi.swarmVariable.data[:, d] = uw.function.evaluate( + self.psi_star[i].sym[d], + self._nswarm_psi.data, + evalf=evalf, + ) if self.preserve_moments and self._workVar.num_components == 1: @@ -749,9 +765,9 @@ def update_pre_solve( orig_index = self._nswarm_psi._nI0.data.copy().reshape(-1) with self.mesh.access(self._workVar): - self._workVar.data[ - orig_index, : - ] = self._nswarm_psi.swarmVariable.data[:, :] + self._workVar.data[orig_index, :] = ( + self._nswarm_psi.swarmVariable.data[:, :] + ) # Project / Copy from advected swarm to semi-Lagrangian variables. @@ -780,9 +796,6 @@ def update_pre_solve( self.I.fn = (self.psi_star[i].sym[0] - Imean) ** 2 IL2 = np.sqrt(self.I.evaluate()) - # if uw.mpi.rank == 0: - # print(f"Pre adjustment: {Imean}, {IL2}", flush=True) - with self.mesh.access(self.psi_star[i]): self.psi_star[i].data[...] += Imean0 - Imean @@ -904,7 +917,7 @@ def __init__( super().__init__() # create a new swarm to manage here - dudt_swarm = uw.swarm.Swarm(mesh) + dudt_swarm = uw.swarm.UWSwarm(mesh) self.mesh = mesh self.swarm = dudt_swarm @@ -990,25 +1003,27 @@ def update_post_solve( # Now update the swarm variable - if evalf: - psi_star_0 = self.psi_star[0] - with self.swarm.access(psi_star_0): - for i in range(psi_star_0.shape[0]): - for j in range(psi_star_0.shape[1]): - updated_psi = uw.function.evalf( - self.psi_fn[i, j], self.swarm.data - ) - psi_star_0[i, j].data[:] = updated_psi + # if evalf: + # psi_star_0 = self.psi_star[0] + # with self.swarm.access(psi_star_0): + # for i in range(psi_star_0.shape[0]): + # for j in range(psi_star_0.shape[1]): + # updated_psi = uw.function.evalf( + # self.psi_fn[i, j], self.swarm.data + # ) + # psi_star_0[i, j].data[:] = updated_psi - else: - psi_star_0 = self.psi_star[0] - with self.swarm.access(psi_star_0): - for i in range(psi_star_0.shape[0]): - for j in range(psi_star_0.shape[1]): - updated_psi = uw.function.evaluate( - self.psi_fn[i, j], self.swarm.data - ) - psi_star_0[i, j].data[:] = updated_psi + # else: + psi_star_0 = self.psi_star[0] + with self.swarm.access(psi_star_0): + for i in range(psi_star_0.shape[0]): + for j in range(psi_star_0.shape[1]): + updated_psi = uw.function.evaluate( + self.psi_fn[i, j], + self.swarm.data, + evalf=evalf, + ) + psi_star_0[i, j].data[:] = updated_psi # Now update the swarm locations @@ -1194,28 +1209,31 @@ def update_post_solve( phi = 1 / self.step_averaging # Now update the swarm variable - if evalf: - psi_star_0 = self.psi_star[0] - with self.swarm.access(psi_star_0): - for i in range(psi_star_0.shape[0]): - for j in range(psi_star_0.shape[1]): - updated_psi = uw.function.evalf( - self.psi_fn[i, j], self.swarm.data - ) - psi_star_0[i, j].data[:] = ( - phi * updated_psi + (1 - phi) * psi_star_0[i, j].data[:] - ) - else: - psi_star_0 = self.psi_star[0] - with self.swarm.access(psi_star_0): - for i in range(psi_star_0.shape[0]): - for j in range(psi_star_0.shape[1]): - updated_psi = uw.function.evaluate( - self.psi_fn[i, j], self.swarm.data - ) - psi_star_0[i, j].data[:] = ( - phi * updated_psi + (1 - phi) * psi_star_0[i, j].data[:] - ) + # if evalf: + # psi_star_0 = self.psi_star[0] + # with self.swarm.access(psi_star_0): + # for i in range(psi_star_0.shape[0]): + # for j in range(psi_star_0.shape[1]): + # updated_psi = uw.function.evalf( + # self.psi_fn[i, j], self.swarm.data + # ) + # psi_star_0[i, j].data[:] = ( + # phi * updated_psi + (1 - phi) * psi_star_0[i, j].data[:] + # ) + # else: + # + psi_star_0 = self.psi_star[0] + with self.swarm.access(psi_star_0): + for i in range(psi_star_0.shape[0]): + for j in range(psi_star_0.shape[1]): + updated_psi = uw.function.evaluate( + self.psi_fn[i, j], + self.swarm.data, + evalf=evalf, + ) + psi_star_0[i, j].data[:] = ( + phi * updated_psi + (1 - phi) * psi_star_0[i, j].data[:] + ) return diff --git a/src/underworld3/systems/solvers.py b/src/underworld3/systems/solvers.py index 22ab3004..15b10ed9 100644 --- a/src/underworld3/systems/solvers.py +++ b/src/underworld3/systems/solvers.py @@ -21,20 +21,6 @@ from .ddt import Symbolic as Symbolic_DDt -# class UW_Scalar_Temple(SNES_Scalar): - -# class UW_Lagrangian_Helper: -# """Mixin style ... add some functions to manage swarm updates etc""" - -# @property -# def phi_star(self): -# return "phi_star" - -# @property -# def phi_star_star(self): -# return "phi_star_star" - - class SNES_Poisson(SNES_Scalar): r""" This class provides functionality for a discrete representation @@ -1456,6 +1442,7 @@ def solve( zero_init_guess: bool = True, timestep: float = None, _force_setup: bool = False, + _evalf=False, verbose=False, ): """ @@ -1485,19 +1472,20 @@ def solve( # Update History / Flux History terms # SemiLagrange and Lagrange may have different sequencing. - self.DuDt.update_pre_solve(timestep, verbose=verbose) - self.DFDt.update_pre_solve(timestep, verbose=verbose) + + self.DuDt.update_pre_solve(timestep, verbose=verbose, evalf=_evalf) + self.DFDt.update_pre_solve(timestep, verbose=verbose, evalf=_evalf) super().solve(zero_init_guess, _force_setup) - self.DuDt.update_post_solve(timestep, verbose=verbose) - self.DFDt.update_post_solve(timestep, verbose=verbose) + self.DuDt.update_post_solve(timestep, verbose=verbose, evalf=_evalf) + self.DFDt.update_post_solve(timestep, verbose=verbose, evalf=_evalf) self.is_setup = True self.constitutive_model._solver_is_setup = True return - + class SNES_Diffusion(SNES_Scalar): r""" @@ -1510,7 +1498,7 @@ class SNES_Diffusion(SNES_Scalar): \color{Maroon}{\underbrace{\Bigl[ f \Bigl] }_{\mathbf{f}}} $$ - The term $\mathbf{F}$ relates diffusive fluxes to gradients in the unknown $u$. + The term $\mathbf{F}$ relates diffusive fluxes to gradients in the unknown $u$. ## Properties @@ -1543,7 +1531,7 @@ def __init__( mesh: uw.discretisation.Mesh, u_Field: uw.discretisation.MeshVariable, order: int = 1, - theta: float = 0., + theta: float = 0.0, evalf: Optional[bool] = False, verbose=False, DuDt: Union[Eulerian_DDt, SemiLagrangian_DDt, Lagrangian_DDt] = None, @@ -1608,12 +1596,11 @@ def __init__( if DFDt is None: self.Unknowns.DFDt = Symbolic_DDt( - sympy.Matrix( - [[0] * self.mesh.dim] ), - varsymbol=rf"{{F[ {self.u.symbol} ] }}", - theta=theta, - bcs=None, - order=order, + sympy.Matrix([[0] * self.mesh.dim]), + varsymbol=rf"{{F[ {self.u.symbol} ] }}", + theta=theta, + bcs=None, + order=order, ) ### solution unable to solve after n timesteps, due to the projection of flux term (???) # self.Unknowns.DFDt = Eulerian_DDt( @@ -1640,7 +1627,7 @@ def F0(self): f0 = uw.function.expression( r"f_0 \left( \mathbf{u} \right)", - -self.f + sympy.simplify( self.DuDt.bdf() ) / self.delta_t, + -self.f + sympy.simplify(self.DuDt.bdf()) / self.delta_t, "Diffusion pointwise force term: f_0(u)", ) @@ -1672,7 +1659,6 @@ def f(self, value): self.is_setup = False self._f = sympy.Matrix((value,)) - @property def delta_t(self): return self._delta_t @@ -1696,9 +1682,7 @@ def estimate_dt(self): """ if isinstance(self.constitutive_model.diffusivity.sym, sympy.Expr): - if uw.function.fn_is_constant_expr( - self.constitutive_model.diffusivity.sym - ): + if uw.function.fn_is_constant_expr(self.constitutive_model.diffusivity.sym): max_diffusivity = uw.function.evaluate( self.constitutive_model.diffusivity.sym, np.zeros((1, self.mesh.dim)), @@ -1729,7 +1713,6 @@ def estimate_dt(self): ## estimate dt of adv and diff components self.dt_diff = 0.0 - dt_diff = (min_dx**2) / diffusivity_glob self.dt_diff = dt_diff @@ -1768,8 +1751,6 @@ def solve( # self._flux = self.constitutive_model.flux.T # self._flux_star = self._flux.copy() - - if not self.is_setup: self._setup_pointwise_functions(verbose) self._setup_discretisation(verbose) diff --git a/src/underworld3/utilities/geometry_tools.py b/src/underworld3/utilities/geometry_tools.py index 4e60fbcf..68d533dc 100644 --- a/src/underworld3/utilities/geometry_tools.py +++ b/src/underworld3/utilities/geometry_tools.py @@ -4,12 +4,81 @@ # TODO: Add the 2D version of this: distance to a line segment. # TODO: Signed distance to plane (NaN if outside prism) -## Distance from triangle to cloud of points +## Note: Distance from triangle to cloud of points ## Follows the open source embree (ray-shading) code (assumed fast). This is the numpy / ## python version of their code. See the diagram here for how the spatial division works: ## https://stackoverflow.com/questions/2924795/fastest-way-to-compute-point-to-triangle-distance-in-3d +def points_in_simplex2D(p, a, b, c): + """ + p - numpy array of points in 3D + a, b, c - triangle points (numpy 1x2 arrays) + + returns: + numpy array of truth values for each of the points in p (is this point in the triangle) + """ + + v0 = (c - a).reshape(1, 2) + v1 = (b - a).reshape(1, 2) + v2 = (p - a).reshape(-1, 2) + + # Compute dot products + def dot(v1, v2): + d = v1[:, 0] * v2[:, 0] + v1[:, 1] * v2[:, 1] + return d + + dot00 = dot(v0, v0) + dot01 = dot(v0, v1) + dot02 = dot(v0, v2) + dot11 = dot(v1, v1) + dot12 = dot(v1, v2) + + # Compute barycentric coordinates + invDenom = 1 / (dot00 * dot11 - dot01 * dot01) + u = (dot11 * dot02 - dot01 * dot12) * invDenom + v = (dot00 * dot12 - dot01 * dot02) * invDenom + w = 1 - u - v + + return np.logical_and((u >= 0), np.logical_and((v >= 0), (w >= 0))) + + +def points_in_simplex3D(p, a, b, c, d): + + vap = (p - a).reshape(-1, 3) + vbp = (p - b).reshape(-1, 3) + + vab = (b - a).reshape(-1, 3) + vac = (c - a).reshape(-1, 3) + vad = (d - a).reshape(-1, 3) + + vbc = (c - b).reshape(-1, 3) + vbd = (d - b).reshape(-1, 3) + + # ScTP computes the scalar triple product a.(bxc): + + def ScTP(a, b, c): + c0 = b[:, 1] * c[:, 2] - b[:, 2] * c[:, 1] + c1 = b[:, 2] * c[:, 0] - b[:, 0] * c[:, 2] + c2 = b[:, 0] * c[:, 1] - b[:, 1] * c[:, 0] + return a[:, 0] * c0 + a[:, 1] * c1 + a[:, 2] * c2 + + va6 = ScTP(vbp, vbd, vbc) + vb6 = ScTP(vap, vac, vad) + vc6 = ScTP(vap, vad, vab) + vd6 = ScTP(vap, vab, vac) + v6 = 1 / ScTP(vab, vac, vad) + + u = va6 * v6 + v = vb6 * v6 + w = vc6 * v6 + t = vd6 * v6 + + return np.logical_and( + np.logical_and((u >= 0), (t >= 0)), np.logical_and((v >= 0), (w >= 0)) + ) + + def distance_pointcloud_triangle(p, a, b, c): """ p - numpy array of points in 3D diff --git a/src/underworld3/visualisation.py b/src/underworld3/visualisation.py index 921b21f6..e08c566c 100644 --- a/src/underworld3/visualisation.py +++ b/src/underworld3/visualisation.py @@ -4,7 +4,6 @@ def initialise(jupyter_backend): - import pyvista as pv pv.global_theme.background = "white" @@ -27,7 +26,7 @@ def initialise(jupyter_backend): return -def mesh_to_pv_mesh(mesh, jupyter_backend=None): +def mesh_to_pv_mesh(mesh0, jupyter_backend=None): """Initialise pyvista engine from existing mesh""" # # Required in notebooks @@ -40,18 +39,57 @@ def mesh_to_pv_mesh(mesh, jupyter_backend=None): import shutil import tempfile import pyvista as pv + import numpy as np + + # with tempfile.TemporaryDirectory() as tmp: + # if type(mesh) == str: # reading msh file directly + # vtk_filename = os.path.join(tmp, "tmpMsh.msh") + # shutil.copyfile(mesh, vtk_filename) + # else: # reading mesh by creating vtk + # vtk_filename = os.path.join(tmp, "tmpMsh.vtk") + # mesh.vtk(vtk_filename) + + # pvmesh = pv.read(vtk_filename) + + # return pvmesh + + ## Alternative - not via file / create an unstructured grid in pyvista + + from petsc4py import PETSc + + match (mesh0.dm.isSimplex(), mesh0.dim): + case (True, 2): + vtk_cell_type = pv.cell.CellType.TRIANGLE + case (True, 3): + vtk_cell_type = pv.cell.CellType.TETRA + case (False, 2): + vtk_cell_type = pv.cell.CellType.QUAD + case (False, 3): + vtk_cell_type = pv.cell.CellType.HEXAHEDRON + + cStart, cEnd = mesh0.dm.getHeightStratum(0) + fStart, fEnd = mesh0.dm.getHeightStratum(1) + pStart, pEnd = mesh0.dm.getDepthStratum(0) + + cell_num_points = mesh0.element.entities[mesh0.dim] + face_num_points = mesh0.element.face_entities[mesh0.dim] - with tempfile.TemporaryDirectory() as tmp: - if type(mesh) == str: # reading msh file directly - vtk_filename = os.path.join(tmp, "tmpMsh.msh") - shutil.copyfile(mesh, vtk_filename) - else: # reading mesh by creating vtk - vtk_filename = os.path.join(tmp, "tmpMsh.vtk") - mesh.vtk(vtk_filename) + cell_points_list = [] + for cell_id in range(cStart, cEnd): + cell_points = mesh0.dm.getTransitiveClosure(cell_id)[0][-cell_num_points:] + cell_points_list.append(cell_points - pStart) - pvmesh = pv.read(vtk_filename) + cells_array = np.array(cell_points_list, dtype=int) + cells_size = np.full((cells_array.shape[0], 1), cell_num_points, dtype=int) + cells_type = np.full((cells_array.shape[0], 1), vtk_cell_type, dtype=int) - return pvmesh + cells_array = np.hstack((cells_size, cells_array), dtype=int) + + pv_mesh = pv.UnstructuredGrid( + cells_array, cells_type, coords_to_pv_coords(mesh0.data) + ) + + return pv_mesh def coords_to_pv_coords(coords): @@ -151,7 +189,7 @@ def scalar_fn_to_pv_points(pv_mesh, uw_fn, dim=None, simplify=True): dim = 3 coords = pv_mesh.points[:, 0:dim] - scalar_values = uw.function.evalf(uw_fn, coords) + scalar_values = uw.function.evaluate(uw_fn, coords, evalf=True) return scalar_values @@ -172,7 +210,7 @@ def vector_fn_to_pv_points(pv_mesh, uw_fn, dim=None, simplify=True): coords = pv_mesh.points[:, 0:dim] vector_values = np.zeros_like(pv_mesh.points) - vector_values[:, 0:dim] = uw.function.evalf(uw_fn, coords) + vector_values[:, 0:dim] = uw.function.evaluate(uw_fn, coords, evalf=True) return vector_values @@ -190,7 +228,7 @@ def vector_fn_to_pv_points(pv_mesh, uw_fn, dim=None, simplify=True): # vector_values = np.zeros_like(coords) # for i in range(0, dim): -# vector_values[:, i] = uw.function.evalf(uw_fn[i], coords) +# vector_values[:, i] = uw.function.evaluate(uw_fn[i], coords, evalf=True) # return vector_values @@ -243,7 +281,6 @@ def plot_mesh( save_png=False, dir_fname="", ): - """ Plot a mesh with optional clipping, edge display, and saving functionality. @@ -330,7 +367,6 @@ def plot_scalar( save_png=False, dir_fname="", ): - """ Plot a scalar quantity from a mesh with options for clipping, colormap, and saving. @@ -463,7 +499,6 @@ def plot_vector( scalar=None, scalar_name="", ): - """ Plot a vector quantity from a mesh with options for clipping, colormap, vector magnitude, and saving. @@ -557,9 +592,7 @@ def plot_vector( pvmesh, sympy.sqrt(vector.sym.dot(vector.sym)) ) else: - pvmesh.point_data[scalar_name] = scalar_fn_to_pv_points( - pvmesh, scalar.sym - ) + pvmesh.point_data[scalar_name] = scalar_fn_to_pv_points(pvmesh, scalar.sym) print(pvmesh.point_data[scalar_name].min(), pvmesh.point_data[scalar_name].max()) @@ -634,7 +667,6 @@ def save_colorbar( output_path="", fname="", ): - """ Save a colorbar separately from a plot with customizable appearance and format. diff --git a/test.sh b/test.sh index 41114573..4eeea66d 100755 --- a/test.sh +++ b/test.sh @@ -15,7 +15,6 @@ PYTEST="pytest --config-file=tests/pytest.ini" $PYTEST tests/test_00[0-4]*py || status=1 #$PYTEST tests/test_0050*py || status=1 # disable auditor test for now - # Spatial / calculation tests $PYTEST tests/test_01*py tests/test_05*py tests/test_06*py || status=1 diff --git a/tests/test_0005_check_xdmf.py b/tests/dont_test_0005_check_xdmf.py similarity index 100% rename from tests/test_0005_check_xdmf.py rename to tests/dont_test_0005_check_xdmf.py diff --git a/tests/test_0004_pointwise_fns.py b/tests/test_0004_pointwise_fns.py index 1ba8d087..933e1543 100644 --- a/tests/test_0004_pointwise_fns.py +++ b/tests/test_0004_pointwise_fns.py @@ -111,38 +111,44 @@ def test_getext_sympy_fns(): ) - # TODO this test is failing after LM changes - DISABLING for now. JG, 26-05-25. -#def test_getext_meshVar(): -# -# res_fn = sympy.ImmutableDenseMatrix([v.sym[0], w.sym]) -# jac_fn = sympy.ImmutableDenseMatrix([x * v.sym[0], y**2]) -# bc_fn = sympy.ImmutableDenseMatrix([v.sym[1] * sympy.sin(x), sympy.cos(y)]) -# bd_res_fn = sympy.ImmutableDenseMatrix([sympy.log(v.sym[0]), sympy.exp(w.sym)]) -# bd_jac_fn = sympy.ImmutableDenseMatrix( -# [sympy.diff(sympy.log(v.sym[0]), y), sympy.diff(sympy.exp(y * x), y)] -# ) -# -# with uw.utilities.CaptureStdout(split=True) as captured_setup_solver: -# compiled_extns, dictionaries = getext( -# mesh, -# [res_fn, res_fn], -# [jac_fn], -# [bc_fn], -# [bd_res_fn], -# [bd_jac_fn], -# mesh.vars.values(), -# verbose=True, -# debug=True, -# debug_name="TEST_2", -# cache=False, -# ) -# -# assert os.path.exists(f"/tmp/fn_ptr_ext_TEST_2") -# assert os.path.exists(f"/tmp/fn_ptr_ext_TEST_2/cy_ext.h") -# assert ( -# r"Processing JIT 5 / Matrix([[{\mathbf{v}}_{ 0,1}(N.x, N.y)/{\mathbf{v}}_{ 0 }(N.x, N.y)], [N.x*exp(N.x*N.y)]])" -# in captured_setup_solver -# ) + +def test_getext_meshVar(): + + res_fn = sympy.ImmutableDenseMatrix([v.sym[0], w.sym]) + jac_fn = sympy.ImmutableDenseMatrix([x * v.sym[0], y**2]) + bc_fn = sympy.ImmutableDenseMatrix([v.sym[1] * sympy.sin(x), sympy.cos(y)]) + bd_res_fn = sympy.ImmutableDenseMatrix([sympy.log(v.sym[0]), sympy.exp(w.sym)]) + bd_jac_fn = sympy.ImmutableDenseMatrix( + [sympy.diff(sympy.log(v.sym[0]), y), sympy.diff(sympy.exp(y * x), y)] + ) + + with uw.utilities.CaptureStdout(split=True) as captured_setup_solver: + compiled_extns, dictionaries = getext( + mesh, + [res_fn, res_fn], + [jac_fn], + [bc_fn], + [bd_res_fn], + [bd_jac_fn], + mesh.vars.values(), + verbose=True, + debug=True, + debug_name="TEST_2", + cache=False, + ) + + print(captured_setup_solver) + print(len(captured_setup_solver)) + + # Notes on this test - would be better to find appropriate substrings among outputs in the list + # because the various disambiguations strings (\\hspace{}) may change if other tests are added + + assert os.path.exists(f"/tmp/fn_ptr_ext_TEST_2") + assert os.path.exists(f"/tmp/fn_ptr_ext_TEST_2/cy_ext.h") + assert ( + "Processing JIT 5 / Matrix([[{ \\hspace{ 0.02pt } {\\mathbf{v}} }_{ 0,1}(N.x, N.y)/{ \\hspace{ 0.02pt } {\\mathbf{v}} }_{ 0 }(N.x, N.y)], [N.x*exp(N.x*N.y)]])" + in captured_setup_solver + ) # def test_build_functions(): diff --git a/tests/test_0101_kdtree.py b/tests/test_0101_kdtree.py index 1c52c064..4de1c40a 100644 --- a/tests/test_0101_kdtree.py +++ b/tests/test_0101_kdtree.py @@ -14,6 +14,7 @@ coords[0] = (0.5,) * 3 test_single_data.append((100000, 3, coords)) + # Single coord test @pytest.mark.parametrize("n,dim,coords", test_single_data) def test_single_coord(n, dim, coords): @@ -25,20 +26,20 @@ def test_single_coord(n, dim, coords): # Use brute force to find closest point repeatcoords = np.repeat(coords, n, 0) - diff = pts - repeatcoords[:, 0:dim] - brute_dist = np.sqrt( np.sum( np.multiply(diff, diff), 1) ) - brute_id = np.argmin(brute_dist) + diff = pts - repeatcoords[:, 0:dim] + brute_dist = np.sqrt(np.sum(np.multiply(diff, diff), 1)) + brute_id = np.argmin(brute_dist) # Build our index index = KDTree(pts) # Use KDTree to find closest point to a coord kd_dist, kd_id = index.query(coords) - assert np.any( kd_id[0]>index.n ) == False, ( - "Some point weren't found. Error" ) + assert np.any(kd_id[0] > index.n) == False, "Some point weren't found. Error" - assert kd_id[0] == brute_id, ( - "KDTree and brute force method did not find the same point." ) + assert ( + kd_id[0] == brute_id + ), "KDTree and brute force method did not find the same point." assert np.allclose(kd_dist[0], brute_dist[brute_id]), ( "KDTree and Numpy did not find the same distance squared.\n" @@ -62,7 +63,7 @@ def test_self_points(n, dim): # Use KDTree to find closest point to a coord (dist, kdpt) = index.query(pts) - assert np.any(kdpt>index.n) == False, "Some point weren't found. Error" + assert np.any(kdpt > index.n) == False, "Some point weren't found. Error" # `find_closest_point` should return index of pts. assert np.allclose(np.arange(n), kdpt), "Point indices weren't as expected." # Distance to self should be zero! @@ -89,9 +90,9 @@ def test_mesh_verts(res, dim): coords = mesh.data.copy() + 0.5 * elsize * np.random.random(mesh.data.shape) (dist, kdpt) = index.query(coords) - assert np.any(kdpt>index.n) == False, "Some point weren't found. Error" + assert np.any(kdpt > index.n) == False, "Some point weren't found. Error" - #assert np.allclose(True, found), "All points should have been found." + # assert np.allclose(True, found), "All points should have been found." # `find_closest_point` should return index of pts. assert np.allclose( np.arange(mesh.data.shape[0]), kdpt @@ -103,8 +104,8 @@ def test_mesh_verts(res, dim): # Mesh centroid test -@pytest.mark.parametrize("res,dim,ppcell", [(16, 2, 4), (16, 3, 4)]) -def test_mesh_centroid(res, dim, ppcell): +@pytest.mark.parametrize("res,dim,fill_param", [(16, 2, 4), (16, 3, 4)]) +def test_mesh_centroid(res, dim, fill_param): """ Generate a mesh and create the kd index using the mesh centroids. Generate a swarm, @@ -113,21 +114,24 @@ def test_mesh_centroid(res, dim, ppcell): owning cell. """ mesh = uw.meshing.StructuredQuadBox(elementRes=(res,) * dim) + swarm = uw.swarm.Swarm(mesh) + swarm.populate(fill_param=fill_param) + + pts_per_cell = swarm.dm.getSize() // mesh._centroids.shape[0] + centroids = np.zeros((res**dim, dim)) - for index in range(res**dim): - centroids[index] = mesh.dm.computeCellGeometryFVM(index)[1] + cellid = np.zeros((res**dim * pts_per_cell), dtype=int) - index = KDTree(centroids) + for index in range(res**dim): + centroids[index] = mesh._centroids[index] + for i in range(pts_per_cell): + cellid[index * pts_per_cell + i] = index - # add and populate swarm - swarm = uw.swarm.Swarm(mesh) - swarm.populate(fill_param=ppcell) + index = uw.kdtree.KDTree(centroids) with swarm.access(): dist, kdpt = index.query(swarm.data) - assert np.any(kdpt>index.n) == False, "Some point weren't found. Error" - # `find_closest_point` should return index of pts. - assert np.allclose( - swarm.particle_cellid.data[:, 0], kdpt - ), "Point indices weren't as expected." + assert np.any(kdpt > index.n) == False, "Some point weren't found. Error" + # `find_closest_point` should return index of pts. + assert np.allclose(cellid, kdpt), "Point indices weren't as expected." diff --git a/tests/test_0503_evaluate.py b/tests/test_0503_evaluate.py index e3e90d87..37f067b5 100644 --- a/tests/test_0503_evaluate.py +++ b/tests/test_0503_evaluate.py @@ -33,12 +33,31 @@ def tensor_product(order, val1, val2): def test_non_uw_variable_constant(): mesh = uw.meshing.StructuredQuadBox() - result = fn.evaluate(sympy.sympify(1.5), coords, coord_sys=mesh.N) + result = fn.evaluate( + sympy.sympify(1.5), + coords, + coord_sys=mesh.N, + verbose=True, + ) assert np.allclose(1.5, result, rtol=1e-05, atol=1e-08) del mesh +def test_non_uw_variable_constant_evalf(): + mesh = uw.meshing.StructuredQuadBox() + result = fn.evaluate( + sympy.sympify(1.5), + coords, + coord_sys=mesh.N, + evalf=True, + verbose=True, + ) + + assert np.allclose(1.5, result, rtol=1e-05, atol=1e-08) + del mesh + + def test_non_uw_variable_linear(): mesh = uw.meshing.StructuredQuadBox() result = fn.evaluate(mesh.r[0], coords, coord_sys=mesh.N) @@ -58,12 +77,12 @@ def test_non_uw_variable_sine(): def test_single_scalar_variable(): mesh = uw.meshing.StructuredQuadBox() var = uw.discretisation.MeshVariable( - varname="scalar_var", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR + varname="scalar_var_3", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR ) with mesh.access(var): var.data[:] = 1.1 - result = fn.evaluate(var.sym[0], coords) + result = fn.evaluate(var.sym[0], coords, evalf=True) assert np.allclose(1.1, result, rtol=1e-05, atol=1e-08) del mesh @@ -72,196 +91,11 @@ def test_single_scalar_variable(): def test_single_vector_variable(): mesh = uw.meshing.StructuredQuadBox() var = uw.discretisation.MeshVariable( - varname="vector_var", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR + varname="vector_var_4", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR ) with mesh.access(var): var.data[:] = (1.1, 1.2) - result = uw.function.evaluate(var.fn, coords) + result = uw.function.evaluate(var.sym, coords, evalf=True) assert np.allclose(np.array(((1.1, 1.2),)), result, rtol=1e-05, atol=1e-08) del mesh - - -def test_scalar_vector_mult(): - mesh = uw.meshing.StructuredQuadBox() - var_scalar = uw.discretisation.MeshVariable( - varname="var_scalar", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR - ) - var_vector = uw.discretisation.MeshVariable( - varname="var_vector", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR - ) - with mesh.access(var_scalar, var_vector): - var_scalar.data[:] = 3.0 - var_vector.data[:, :] = (4.0, 5.0) - - result = uw.function.evaluate(var_scalar.sym[0] * var_vector.sym[0], coords) - assert np.allclose(np.array(((12.0),)), result, rtol=1e-05, atol=1e-08) - result = uw.function.evaluate(var_scalar.sym[0] * var_vector.sym[1], coords) - assert np.allclose(np.array(((15.0),)), result, rtol=1e-05, atol=1e-08) - - del mesh - - -def test_vector_dot_product(): - mesh = uw.meshing.StructuredQuadBox() - var_vector1 = uw.discretisation.MeshVariable( - varname="var_vector1", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR - ) - var_vector2 = uw.discretisation.MeshVariable( - varname="var_vector2", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR - ) - with mesh.access(var_vector1, var_vector2): - var_vector1.data[:] = (1.0, 2.0) - var_vector2.data[:] = (3.0, 4.0) - result = uw.function.evaluate(var_vector1.fn.dot(var_vector2.fn), coords) - assert np.allclose(11.0, result, rtol=1e-05, atol=1e-08) - - del mesh - - -# that test needs to be able to take degree as a parameter... -def test_polynomial_mesh_var_degree(): - mesh = uw.meshing.StructuredQuadBox() - maxdegree = 10 - vars = [] - - # Create required vars of different degree. - for degree in range(maxdegree + 1): - vars.append( - uw.discretisation.MeshVariable( - varname="var" + str(degree), - mesh=mesh, - num_components=1, - vtype=uw.VarType.SCALAR, - degree=degree, - ) - ) - - # Set variable data to represent polynomial function. - for var in vars: - with mesh.access(var): - vcoords = var.coords - var.data[:, 0] = tensor_product(var.degree, vcoords[:, 0], vcoords[:, 1]) - - # Test that interpolated variables reproduce exactly polymial function of associated degree. - for var in vars: - result = uw.function.evaluate(var.fn, coords, mesh.N) - assert np.allclose( - tensor_product(var.degree, coords[:, 0], coords[:, 1]), - result, - rtol=1e-05, - atol=1e-08, - ) - - -def test_many_many_scalar_mult_var(): - mesh = uw.meshing.StructuredQuadBox() - # Note that this test fails for n>~15. Something something subdm segfault. - # Must investigate. - nn = 15 - vars = [] - for i in range(nn): - vars.append( - uw.discretisation.MeshVariable( - varname=f"var_{i}", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR - ) - ) - factorial = 1.0 - with mesh.access(*vars): - for i, var in enumerate(vars): - var.data[:] = float(i) - factorial *= float(i) - multexpr = vars[0].fn - for var in vars[1:]: - multexpr *= var.fn - result = uw.function.evaluate(multexpr, coords) - assert np.allclose(factorial, result, rtol=1e-05, atol=1e-08) - - -# Let's now do the same, but instead do it Sympy wise. -# We don't really need any UW infrastructure for this test, but it's useful -# to stress our `evaluate()` function. It should however simply reduce -# to Sympy's `lambdify` routine. - - -def test_polynomial_sympy(): - degree = 20 - mesh = uw.meshing.StructuredQuadBox() - assert np.allclose( - tensor_product(degree, coords[:, 0], coords[:, 1]), - uw.function.evaluate( - tensor_product(degree, mesh.r[0], mesh.r[1]), coords, coord_sys=mesh.N - ), - rtol=1e-05, - atol=1e-08, - ) - - -# Now we'll do something similar but involve UW variables. -# Instead of using the Sympy symbols for (x,y), we'll set the -# coordinate locations on the var data itself. -# For a cartesian mesh, linear elements will suffice. -# We'll also do it twice, once using (xvar,yvar), and -# another time using (xyvar[0], xyvar[1]). - - -def test_polynomial_mesh_var_sympy(): - mesh = uw.meshing.StructuredQuadBox() - xvar = uw.discretisation.MeshVariable( - varname="xvar", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR - ) - yvar = uw.discretisation.MeshVariable( - varname="yvar", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR - ) - xyvar = uw.discretisation.MeshVariable( - varname="xyvar", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR - ) - with mesh.access(xvar, yvar, xyvar): - # Note that all the `coords` arrays should actually reduce to an identical array, - # as all vars have identical degree and layout. - xvar.data[:, 0] = xvar.coords[:, 0] - yvar.data[:, 0] = yvar.coords[:, 1] - xyvar.data[:] = xyvar.coords[:] - degree = 10 - assert np.allclose( - tensor_product(degree, coords[:, 0], coords[:, 1]), - uw.function.evaluate(tensor_product(degree, xvar.fn, yvar.fn), coords), - rtol=1e-05, - atol=1e-08, - ) - assert np.allclose( - tensor_product(degree, coords[:, 0], coords[:, 1]), - uw.function.evaluate( - tensor_product(degree, xyvar.fn.dot(mesh.N.i), xyvar.fn.dot(mesh.N.j)), - coords, - ), - rtol=1e-05, - atol=1e-08, - ) - - -# NOTE THAT WE NEEDED TO DISABLE MESH HASHING FOR 3D MESH FOR SOME REASON. -# CHECK `DMInterpolationSetUp_UW()` FOR DETAILS. -def test_3d_cross_product(): - # Create a set of evaluation coords in 3d - n = 10 - x = np.linspace(0.1, 0.9, n) - y = np.linspace(0.2, 0.8, n) - z = np.linspace(0.3, 0.7, n) - xv, yv, zv = np.meshgrid(x, y, z, sparse=True) - coords = np.vstack((xv[0, :, 0], yv[:, 0, 0], zv[0, 0, :])).T - - # Now mesh and vars etc. - mesh = uw.meshing.StructuredQuadBox(elementRes=(4,) * 3) - name = "vector cross product test" - var_vector1 = uw.discretisation.MeshVariable( - varname="var_vector1", mesh=mesh, num_components=3, vtype=uw.VarType.VECTOR - ) - var_vector2 = uw.discretisation.MeshVariable( - varname="var_vector2", mesh=mesh, num_components=3, vtype=uw.VarType.VECTOR - ) - with mesh.access(var_vector1, var_vector2): - var_vector1.data[:] = (1.0, 2.0, 3.0) - var_vector2.data[:] = (4.0, 5.0, 6.0) - result = uw.function.evaluate(var_vector1.fn.cross(var_vector2.fn), coords) - assert np.allclose(np.array(((-3, 6, -3),)), result, rtol=1e-05, atol=1e-08) diff --git a/tests/test_0503_evaluate2.py b/tests/test_0503_evaluate2.py new file mode 100644 index 00000000..e54df8de --- /dev/null +++ b/tests/test_0503_evaluate2.py @@ -0,0 +1,258 @@ +import os + +# DISABLE SYMPY CACHE, AS IT GETS IN THE WAY FOR IDENTICALLY NAMED VARIABLES. +# NEED TO FIX. + +# os.environ["SYMPY_USE_CACHE"] = "no" +import underworld3 as uw +import underworld3.function as fn +import numpy as np +import sympy +import pytest + + +n = 10 +x = np.linspace(0.1, 0.9, n) +y = np.linspace(0.2, 0.8, n) +xv, yv = np.meshgrid(x, y, sparse=True) +coords = np.vstack((xv[0, :], yv[:, 0])).T + + +def tensor_product(order, val1, val2): + sum = 0.0 + order += 1 + for i in range(order): + for j in range(order): + sum += val1**i * val2**j + return sum + + +def test_number_vector_mult(): + mesh = uw.meshing.StructuredQuadBox() + var_vector = uw.discretisation.MeshVariable( + varname="var_vector_1", + mesh=mesh, + num_components=2, + vtype=uw.VarType.VECTOR, + varsymbol=r"V", + ) + with mesh.access(var_vector): + var_vector.data[:, :] = (4.0, 5.0) + + result = uw.function.evaluate(3 * var_vector.sym[0], coords) + assert np.allclose(np.array(((12.0),)), result, rtol=1e-05, atol=1e-08) + result = uw.function.evaluate(3 * var_vector.sym[1], coords) + assert np.allclose(np.array(((15.0),)), result, rtol=1e-05, atol=1e-08) + + del mesh + + +def test_number_vector_mult_evalf(): + mesh = uw.meshing.StructuredQuadBox() + var_vector = uw.discretisation.MeshVariable( + varname="var_vector_1", + mesh=mesh, + num_components=2, + vtype=uw.VarType.VECTOR, + varsymbol=r"V", + ) + with mesh.access(var_vector): + var_vector.data[:, :] = (4.0, 5.0) + + result = uw.function.evaluate(3 * var_vector.sym[0], coords, evalf=True) + assert np.allclose(np.array(((12.0),)), result, rtol=1e-05, atol=1e-08) + result = uw.function.evaluate(3 * var_vector.sym[1], coords, evalf=True) + assert np.allclose(np.array(((15.0),)), result, rtol=1e-05, atol=1e-08) + + del mesh + + +def test_scalar_vector_mult(): + mesh = uw.meshing.StructuredQuadBox() + var_scalar = uw.discretisation.MeshVariable( + varname="var_scalar_2", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR + ) + var_vector = uw.discretisation.MeshVariable( + varname="var_vector_2", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR + ) + with mesh.access(var_scalar, var_vector): + var_scalar.data[:] = 3.0 + var_vector.data[:, :] = (4.0, 5.0) + + result = uw.function.evaluate(var_scalar.sym[0] * var_vector.sym[0], coords) + assert np.allclose(np.array(((12.0),)), result, rtol=1e-05, atol=1e-08) + result = uw.function.evaluate(var_scalar.sym[0] * var_vector.sym[1], coords) + assert np.allclose(np.array(((15.0),)), result, rtol=1e-05, atol=1e-08) + + del mesh + + +def test_vector_dot_product(): + mesh = uw.meshing.StructuredQuadBox() + var_vector1 = uw.discretisation.MeshVariable( + varname="var_vector1", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR + ) + var_vector2 = uw.discretisation.MeshVariable( + varname="var_vector2", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR + ) + with mesh.access(var_vector1, var_vector2): + var_vector1.data[:] = (1.0, 2.0) + var_vector2.data[:] = (3.0, 4.0) + result = uw.function.evaluate( + var_vector1.sym.dot(var_vector2.sym), coords, evalf=True + ) + assert np.allclose(11.0, result, rtol=1e-05, atol=1e-08) + + del mesh + + +# that test needs to be able to take degree as a parameter... +def test_polynomial_mesh_var_degree(): + mesh = uw.meshing.StructuredQuadBox() + maxdegree = 10 + vars = [] + + # Create required vars of different degree. + for degree in range(maxdegree + 1): + vars.append( + uw.discretisation.MeshVariable( + varname="var" + str(degree), + mesh=mesh, + num_components=1, + vtype=uw.VarType.SCALAR, + degree=degree, + ) + ) + + # Set variable data to represent polynomial function. + for var in vars: + with mesh.access(var): + vcoords = var.coords + var.data[:, 0] = tensor_product(var.degree, vcoords[:, 0], vcoords[:, 1]) + + # Test that interpolated variables reproduce exactly polymial function of associated degree. + for var in vars: + result = uw.function.evaluate(var.sym[0], coords) + assert np.allclose( + tensor_product(var.degree, coords[:, 0], coords[:, 1]), + result, + rtol=1e-05, + atol=1e-08, + ) + + +def test_many_many_scalar_mult_var(): + mesh = uw.meshing.StructuredQuadBox() + # Note that this test fails for n>~15. Something something subdm segfault. + # Must investigate. + nn = 15 + vars = [] + for i in range(nn): + vars.append( + uw.discretisation.MeshVariable( + varname=f"var_{i}", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR + ) + ) + factorial = 1.0 + with mesh.access(*vars): + for i, var in enumerate(vars): + var.data[:] = float(i) + factorial *= float(i) + multexpr = vars[0].fn + for var in vars[1:]: + multexpr *= var.fn + result = uw.function.evaluate(multexpr, coords) + assert np.allclose(factorial, result, rtol=1e-05, atol=1e-08) + + +# Let's now do the same, but instead do it Sympy wise. +# We don't really need any UW infrastructure for this test, but it's useful +# to stress our `evaluate()` function. It should however simply reduce +# to Sympy's `lambdify` routine. + + +def test_polynomial_sympy(): + degree = 20 + mesh = uw.meshing.StructuredQuadBox() + assert np.allclose( + tensor_product(degree, coords[:, 0], coords[:, 1]), + uw.function.evaluate( + tensor_product(degree, mesh.r[0], mesh.r[1]), + coords, + coord_sys=mesh.N, + ), + rtol=1e-05, + atol=1e-08, + ) + + +# Now we'll do something similar but involve UW variables. +# Instead of using the Sympy symbols for (x,y), we'll set the +# coordinate locations on the var data itself. +# For a cartesian mesh, linear elements will suffice. +# We'll also do it twice, once using (xvar,yvar), and +# another time using (xyvar[0], xyvar[1]). + + +def test_polynomial_mesh_var_sympy(): + mesh = uw.meshing.StructuredQuadBox() + xvar = uw.discretisation.MeshVariable( + varname="xvar", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR + ) + yvar = uw.discretisation.MeshVariable( + varname="yvar", mesh=mesh, num_components=1, vtype=uw.VarType.SCALAR + ) + xyvar = uw.discretisation.MeshVariable( + varname="xyvar", mesh=mesh, num_components=2, vtype=uw.VarType.VECTOR + ) + with mesh.access(xvar, yvar, xyvar): + # Note that all the `coords` arrays should actually reduce to an identical array, + # as all vars have identical degree and layout. + xvar.data[:, 0] = xvar.coords[:, 0] + yvar.data[:, 0] = yvar.coords[:, 1] + xyvar.data[:] = xyvar.coords[:] + degree = 10 + assert np.allclose( + tensor_product(degree, coords[:, 0], coords[:, 1]), + uw.function.evaluate(tensor_product(degree, xvar.fn, yvar.fn), coords), + rtol=1e-05, + atol=1e-08, + ) + assert np.allclose( + tensor_product(degree, coords[:, 0], coords[:, 1]), + uw.function.evaluate( + tensor_product(degree, xyvar.fn.dot(mesh.N.i), xyvar.fn.dot(mesh.N.j)), + coords, + ), + rtol=1e-05, + atol=1e-08, + ) + + +# NOTE THAT WE NEEDED TO DISABLE MESH HASHING FOR 3D MESH FOR SOME REASON. +# CHECK `DMInterpolationSetUp_UW()` FOR DETAILS. + + +def test_3d_cross_product(): + # Create a set of evaluation coords in 3d + n = 10 + x = np.linspace(0.1, 0.9, n) + y = np.linspace(0.2, 0.8, n) + z = np.linspace(0.3, 0.7, n) + xv, yv, zv = np.meshgrid(x, y, z, sparse=True) + coords = np.vstack((xv[0, :, 0], yv[:, 0, 0], zv[0, 0, :])).T + + # Now mesh and vars etc. + mesh = uw.meshing.StructuredQuadBox(elementRes=(4,) * 3) + name = "vector cross product test" + var_vector1 = uw.discretisation.MeshVariable( + varname="var_vector1", mesh=mesh, num_components=3, vtype=uw.VarType.VECTOR + ) + var_vector2 = uw.discretisation.MeshVariable( + varname="var_vector2", mesh=mesh, num_components=3, vtype=uw.VarType.VECTOR + ) + with mesh.access(var_vector1, var_vector2): + var_vector1.data[:] = (1.0, 2.0, 3.0) + var_vector2.data[:] = (4.0, 5.0, 6.0) + result = uw.function.evaluate(var_vector1.fn.cross(var_vector2.fn), coords) + assert np.allclose(np.array(((-3, 6, -3),)), result, rtol=1e-05, atol=1e-08) diff --git a/tests/test_0504_projections.py b/tests/test_0504_projections.py index 8bff1112..f65d8a30 100644 --- a/tests/test_0504_projections.py +++ b/tests/test_0504_projections.py @@ -45,7 +45,9 @@ def test_scalar_projection(): # Set the values on the swarm variable with swarm.access(s_values): - s_values.data[:, 0] = uw.function.evalf(s_fn, swarm.data, coord_sys=mesh.N) + s_values.data[:, 0] = uw.function.evaluate( + s_fn, swarm.data, coord_sys=mesh.N, evalf=True + ) # Prepare projection of swarm values onto the mesh nodes. scalar_projection = uw.systems.Projection(mesh, s_soln) @@ -62,16 +64,20 @@ def test_vector_projection(): # Set the values on the swarm variable with swarm.access(v_values): - v_values.data[:, 0] = uw.function.evalf(s_fn_x, swarm.data, coord_sys=mesh.N) - v_values.data[:, 1] = uw.function.evalf(s_fn_y, swarm.data, coord_sys=mesh.N) + v_values.data[:, 0] = uw.function.evaluate( + s_fn_x, swarm.data, coord_sys=mesh.N, evalf=True + ) + v_values.data[:, 1] = uw.function.evaluate( + s_fn_y, swarm.data, coord_sys=mesh.N, evalf=True + ) vector_projection = uw.systems.Vector_Projection(mesh, v_soln) vector_projection.uw_function = v_values.sym vector_projection.smoothing = 1.0e-3 - vector_projection.add_dirichlet_bc((0.0,None), "Right") - vector_projection.add_dirichlet_bc((None,0.0), "Top") - vector_projection.add_dirichlet_bc((None,0.0), "Bottom") + vector_projection.add_dirichlet_bc((0.0, None), "Right") + vector_projection.add_dirichlet_bc((None, 0.0), "Top") + vector_projection.add_dirichlet_bc((None, 0.0), "Bottom") vector_projection.solve() @@ -82,7 +88,9 @@ def test_gradient_recovery(): fn = sympy.cos(4.0 * sympy.pi * x) with mesh.access(s_soln): - s_soln.data[:, 0] = uw.function.evalf(fn, s_soln.coords[:], coord_sys=mesh.N) + s_soln.data[:, 0] = uw.function.evaluate( + fn, s_soln.coords[:], coord_sys=mesh.N, evalf=True + ) scalar_projection = uw.systems.Projection(mesh, gradient) scalar_projection.uw_function = s_soln.sym.diff(x)[0]