mesh.cc
Go to the documentation of this file.
1// LIC// ====================================================================
2// LIC// This file forms part of oomph-lib, the object-oriented,
3// LIC// multi-physics finite-element library, available
4// LIC// at http://www.oomph-lib.org.
5// LIC//
6// LIC// Copyright (C) 2006-2025 Matthias Heil and Andrew Hazel
7// LIC//
8// LIC// This library is free software; you can redistribute it and/or
9// LIC// modify it under the terms of the GNU Lesser General Public
10// LIC// License as published by the Free Software Foundation; either
11// LIC// version 2.1 of the License, or (at your option) any later version.
12// LIC//
13// LIC// This library is distributed in the hope that it will be useful,
14// LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16// LIC// Lesser General Public License for more details.
17// LIC//
18// LIC// You should have received a copy of the GNU Lesser General Public
19// LIC// License along with this library; if not, write to the Free Software
20// LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21// LIC// 02110-1301 USA.
22// LIC//
23// LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk.
24// LIC//
25// LIC//====================================================================
26// Non-inline member functions for general mesh classes
27
28#ifdef OOMPH_HAS_MPI
29#include "mpi.h"
30#endif
31
32#include <algorithm>
33#include <limits.h>
34#include <typeinfo>
35
36
37// oomph-lib headers
38#include "oomph_utilities.h"
39#include "mesh.h"
40#include "problem.h"
41#include "elastic_problems.h"
42#include "refineable_mesh.h"
43#include "triangle_mesh.h"
44#include "shape.h"
45
46namespace oomph
47{
48 //======================================================
49 /// The Steady Timestepper
50 //======================================================
52
53
54 //=======================================================================
55 /// Static boolean flag to control warning about mesh level timesteppers
56 //=======================================================================
58 false;
59
60 //=======================================================================
61 /// Merge meshes.
62 /// Note: This simply merges the meshes' elements and nodes (ignoring
63 /// duplicates; no boundary information etc. is created).
64 //=======================================================================
66 {
67 // No boundary lookup scheme is set up for the combined mesh
69
70 // Number of submeshes
71 unsigned nsub_mesh = sub_mesh_pt.size();
72
73 // Initialise element, node and boundary counters for global mesh
74 unsigned long n_element = 0;
75 unsigned long n_node = 0;
76 unsigned n_bound = 0;
77
78 // Loop over submeshes and get total number of elements, nodes and
79 // boundaries
80 for (unsigned imesh = 0; imesh < nsub_mesh; imesh++)
81 {
82 n_element += sub_mesh_pt[imesh]->nelement();
84 n_bound += sub_mesh_pt[imesh]->nboundary();
85 }
86
87 // Reserve storage for element and node pointers
88 Element_pt.clear();
89 Element_pt.reserve(n_element);
90 Node_pt.clear();
91 Node_pt.reserve(n_node);
92
93 // Resize vector of vectors of nodes
94 Boundary_node_pt.clear();
96
97 // Sets of pointers to elements and nodes (to exlude duplicates -- they
98 // shouldn't occur anyway but if they do, they must only be added
99 // once in the global mesh to avoid trouble in the timestepping)
100 std::set<GeneralisedElement*> element_set_pt;
101 std::set<Node*> node_set_pt;
102
103 // Counter for total number of boundaries in all the submeshes
104 unsigned ibound_global = 0;
105 // Loop over the number of submeshes
106 for (unsigned imesh = 0; imesh < nsub_mesh; imesh++)
107 {
108 // Loop over the elements of the submesh and add to vector
109 // duplicates are ignored
110 unsigned nel_before = 0;
111 unsigned long n_element = sub_mesh_pt[imesh]->nelement();
112 for (unsigned long e = 0; e < n_element; e++)
113 {
114 GeneralisedElement* el_pt = sub_mesh_pt[imesh]->element_pt(e);
115 element_set_pt.insert(el_pt);
116 // Was it a duplicate?
117 unsigned nel_now = element_set_pt.size();
118 if (nel_now == nel_before)
119 {
120 std::ostringstream warning_stream;
121 warning_stream << "WARNING: " << std::endl
122 << "Element " << e << " in submesh " << imesh
123 << " is a duplicate \n and was ignored when assembling"
124 << " combined mesh." << std::endl;
126 "Mesh::Mesh(const Vector<Mesh*>&)",
128 }
129 else
130 {
131 Element_pt.push_back(el_pt);
132 }
134 }
135
136 // Loop over the nodes of the submesh and add to vector
137 // duplicates are ignored
138 unsigned nnod_before = 0;
139 unsigned long n_node = sub_mesh_pt[imesh]->nnode();
140 for (unsigned long n = 0; n < n_node; n++)
141 {
143 node_set_pt.insert(nod_pt);
144 // Was it a duplicate?
145 unsigned nnod_now = node_set_pt.size();
146 if (nnod_now == nnod_before)
147 {
148 std::ostringstream warning_stream;
150 << "WARNING: " << std::endl
151 << "Node " << n << " in submesh " << imesh
152 << " is a duplicate \n and was ignored when assembling "
153 << "combined mesh." << std::endl;
155 "Mesh::Mesh(const Vector<Mesh*>&)",
157 }
158 else
159 {
160 Node_pt.push_back(nod_pt);
161 }
163 }
164
165 // Loop over the boundaries of the submesh
166 unsigned n_bound = sub_mesh_pt[imesh]->nboundary();
167 for (unsigned ibound = 0; ibound < n_bound; ibound++)
168 {
169 // Loop over the number of nodes on the boundary and add to the
170 // global vector
171 unsigned long n_bound_node = sub_mesh_pt[imesh]->nboundary_node(ibound);
172 for (unsigned long n = 0; n < n_bound_node; n++)
173 {
176 }
177 // Increase the number of the global boundary counter
179 }
180 } // End of loop over submeshes
181 }
182
183
184 //========================================================
185 /// Remove the information about nodes stored on the
186 /// b-th boundary of the mesh
187 //========================================================
188 void Mesh::remove_boundary_nodes(const unsigned& b)
189 {
190 // Loop over all the nodes on the boundary and call
191 // their remove_from_boundary function
192 unsigned n_boundary_node = Boundary_node_pt[b].size();
193 for (unsigned n = 0; n < n_boundary_node; n++)
194 {
196 }
197 // Clear the storage
198 Boundary_node_pt[b].clear();
199 }
200
201 //=================================================================
202 /// Remove all information about mesh boundaries
203 //================================================================
205 {
206 // Loop over each boundary call remove_boundary_nodes
207 unsigned n_bound = Boundary_node_pt.size();
208 for (unsigned b = 0; b < n_bound; b++)
209 {
211 }
212 // Clear the storage
213 Boundary_node_pt.clear();
214 }
215
216 //============================================================
217 /// Remove the node node_pt from the b-th boundary of the mesh
218 /// This function also removes the information from the Node
219 /// itself
220 //===========================================================
221 void Mesh::remove_boundary_node(const unsigned& b, Node* const& node_pt)
222 {
223 // Find the location of the node in the boundary
224 Vector<Node*>::iterator it = std::find(
226 // If the node is on this boundary
227 if (it != Boundary_node_pt[b].end())
228 {
229 // Remove the node from the mesh's list of boundary nodes
230 Boundary_node_pt[b].erase(it);
231 // Now remove the node's boundary information
233 }
234 // If not do nothing
235 }
236
237
238 //========================================================
239 /// Add the node node_pt to the b-th boundary of the mesh
240 /// This function also sets the boundary information in the
241 /// Node itself
242 //=========================================================
243 void Mesh::add_boundary_node(const unsigned& b, Node* const& node_pt)
244 {
245 // Tell the node that it's on boundary b.
246 // At this point, if the node is not a BoundaryNode, the function
247 // should throw an exception.
249
250 // Get the size of the Boundary_node_pt vector
251 unsigned nbound_node = Boundary_node_pt[b].size();
253 // Loop over the vector
254 for (unsigned n = 0; n < nbound_node; n++)
255 {
256 // is the current node here already?
257 if (node_pt == Boundary_node_pt[b][n])
258 {
260 }
261 }
262
263 // Add the base node pointer to the vector if it's not there already
265 {
266 Boundary_node_pt[b].push_back(node_pt);
267 }
268 }
269
270
271 //=======================================================
272 /// Update nodal positions in response to changes in the domain shape.
273 /// Uses the FiniteElement::get_x(...) function for FiniteElements
274 /// and doesn't do anything for other element types.
275 /// If a MacroElement pointer has been set for a FiniteElement,
276 /// the MacroElement representation is used to update the
277 /// nodal positions; if not get_x(...) uses the FE interpolation
278 /// and thus leaves the nodal positions unchanged.
279 /// Virtual, so it can be overloaded by specific meshes,
280 /// such as AlgebraicMeshes or SpineMeshes.
281 /// Generally, this function updates the position of all nodes
282 /// in response to changes in the boundary position. For
283 /// SolidNodes it only applies the update to those SolidNodes
284 /// whose position is determined by the boundary position, unless
285 /// the bool flag is set to true.
286 //========================================================
288 {
289 // Get the current time
290 double t_start = TimingHelpers::timer();
291
292#ifdef PARANOID
293#ifdef OOMPH_HAS_MPI
294 // Paranoid check to throw an error if node update is called for elements
295 // with nonuniformly spaced nodes for which some masters are 'external'
296 for (unsigned long n = 0; n < nnode(); n++)
297 {
298 Node* nod_pt = Node_pt[n];
299 if (nod_pt->is_hanging())
300 {
301 // Loop over master nodes
302 unsigned nmaster = nod_pt->hanging_pt()->nmaster();
303 for (unsigned imaster = 0; imaster < nmaster; imaster++)
304 {
305 // Get pointer to master node
306 Node* master_nod_pt = nod_pt->hanging_pt()->master_node_pt(imaster);
307
308 // Get vector of all external halo nodes
311
312 // Search the external halo storage for this node
316
317 // Check if the node was found
318 if (it != external_halo_node_pt.end())
319 {
320 // Throw error becase node update won't work
321 // It's ok to throw an error here because this function is
322 // overloaded for Algebraic and MacroElementNodeUpdate
323 // Meshes. This is only a problem for meshes of ordinary
324 // nodes.
325 std::ostringstream err_stream;
326
327 err_stream << "Calling node_update() for a mesh which contains"
328 << std::endl
329 << "master nodes which live in the external storage."
330 << std::endl
331 << "These nodes do not belong to elements on this"
332 << std::endl
333 << "processor and therefore cannot be updated locally."
334 << std::endl;
335
336 throw OomphLibError(err_stream.str(),
339 }
340 }
341 }
342 }
343 // If we get to here then none of the masters of any of the nodes in the
344 // mesh live in the external storage, so we'll be fine if we carry on.
345#endif
346#endif
347
348 /// Local and global (Eulerian) coordinate
351
352 // NB: This repeats nodes a lot - surely it would be
353 // quicker to modify it so that it only does each node once,
354 // particularly in the update_all_solid_nodes=true case?
355 // Create a map to indicate whether or not we've updated a node already
356 std::map<Node*, bool> has_node_been_updated;
357
358 // How many nodes are there?
359 unsigned n_node = nnode();
360
361 // Loop over all Nodes
362 for (unsigned n = 0; n < n_node; n++)
363 {
364 // Get pointer to node
365 Node* nod_pt = node_pt(n);
366
367 // Initialise the boolean value associated with this node
369 }
370
371 // Loop over all elements
372 unsigned nel = nelement();
373 for (unsigned e = 0; e < nel; e++)
374 {
375 // Try to cast to FiniteElement
376 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(element_pt(e));
377
378 // If it's a finite element we can proceed: FiniteElements have
379 // nodes and a get_x() function
380 if (el_pt != 0)
381 {
382 // Find out dimension of element = number of local coordinates
383 unsigned ndim_el = el_pt->dim();
384 s.resize(ndim_el);
385
386 // Loop over nodal points
387 unsigned n_node = el_pt->nnode();
388 for (unsigned j = 0; j < n_node; j++)
389 {
390 // Get pointer to node
392
393 // Get spatial dimension of node
394 unsigned ndim_node = nod_pt->ndim();
395 r.resize(ndim_node);
396
397 // For non-hanging nodes
398 if (!(nod_pt->is_hanging()))
399 {
400 // If we've not dealt with this Node yet
402 {
403 // Get the position of the node
405
406 // Get new position
407 el_pt->get_x(s, r);
408
409 // Try to cast to SolidNode
410 SolidNode* solid_node_pt = dynamic_cast<SolidNode*>(nod_pt);
411
412 // Loop over coordinate directions
413 for (unsigned i = 0; i < ndim_node; i++)
414 {
415 // It's a SolidNode:
416 if (solid_node_pt != 0)
417 {
418 // only do it if explicitly requested!
420 {
421 solid_node_pt->x(i) = r[i];
422 }
423 }
424 // Not a SolidNode: Definitely update
425 else
426 {
427 nod_pt->x(i) = r[i];
428 }
429 }
430
431 // Indicate that we're done with this node, regardless of whether
432 // or not it even needed updating
434 } // if (!has_node_been_updated[nod_pt])
435 } // if (!(nod_pt->is_hanging()))
436 } // for (unsigned j=0;j<n_node;j++)
437 } // if (el_pt!=0)
438 } // for (unsigned e=0;e<nel;e++)
439
440 // Now update the external halo nodes before we adjust the positions of the
441 // hanging nodes in case any are masters of local nodes
442#ifdef OOMPH_HAS_MPI
443 // Loop over all external halo nodes with other processors
444 // and update them
445 for (std::map<unsigned, Vector<Node*>>::iterator it =
446 External_halo_node_pt.begin();
447 it != External_halo_node_pt.end();
448 it++)
449 {
450 // Get vector of external halo nodes
451 Vector<Node*> ext_halo_node_pt = (*it).second;
452 unsigned nnod = ext_halo_node_pt.size();
453 for (unsigned j = 0; j < nnod; j++)
454 {
456 }
457 }
458#endif
459
460 // Now loop over hanging nodes and adjust their position
461 // in line with their hanging node constraints
462 for (unsigned long n = 0; n < n_node; n++)
463 {
464 Node* nod_pt = Node_pt[n];
465 if (nod_pt->is_hanging())
466 {
467 // Get spatial dimension of node
468 unsigned ndim_node = nod_pt->ndim();
469
470 // Initialise
471 for (unsigned i = 0; i < ndim_node; i++)
472 {
473 nod_pt->x(i) = 0.0;
474 }
475
476 // Loop over master nodes
477 unsigned nmaster = nod_pt->hanging_pt()->nmaster();
478 for (unsigned imaster = 0; imaster < nmaster; imaster++)
479 {
480 // Loop over directions
481 for (unsigned i = 0; i < ndim_node; i++)
482 {
483 nod_pt->x(i) +=
484 nod_pt->hanging_pt()->master_node_pt(imaster)->x(i) *
485 nod_pt->hanging_pt()->master_weight(imaster);
486 }
487 }
488 }
489 }
490
491 // Loop over all nodes again and execute auxiliary node update
492 // function
493 for (unsigned long n = 0; n < n_node; n++)
494 {
495 Node_pt[n]->perform_auxiliary_node_update_fct();
496 }
497
498 // Tell the user how long it's taken
499 oomph_info << "Time taken to update nodal positions [sec]: "
500 << TimingHelpers::timer() - t_start << std::endl;
501 }
502
503
504 //=======================================================
505 /// Reorder nodes in the order in which they are
506 /// encountered when stepping through the elements
507 //========================================================
509 {
510 // Create storage for the reordered nodes
512
513 // Get the reordered nodes (without altering the mesh's node vector)
515
516 // Get the number of nodes in the mesh
517 unsigned n_node = nnode();
518
519 // Loop over all of the nodes
520 for (unsigned i = 0; i < n_node; i++)
521 {
522 // Replace the Mesh's i-th node pointer with the reordered node pointer
523 node_pt(i) = reordering[i];
524 }
525 } // End of reorder_nodes
526
527 //=======================================================
528 /// Get a vector of the nodes in the order in which they are encountered
529 /// when stepping through the elements (similar to reorder_nodes() but
530 /// without changing the mesh's node vector).
531 //========================================================
533 const bool& use_old_ordering) const
534 {
535 // If the user wants to use the original order
537 {
538 // Setup map to check if nodes have been done yet
539 std::map<Node*, bool> done;
540
541 // Loop over all nodes
542 unsigned nnod = nnode();
543
544 // Initialise the vector
545 reordering.assign(nnod, 0);
546
547 // Return immediately if there are no nodes: Note assumption:
548 // Either all the elements' nodes stored here or none. If only a subset
549 // is stored in the Node_pt vector we'll get a range checking error below
550 // (only if run with range checking, of course).
551 if (nnod == 0)
552 {
553 // Return immediately
554 return;
555 }
556
557 // Loop over the nodes in the mesh
558 for (unsigned j = 0; j < nnod; j++)
559 {
560 // Indicate whether or not the node has been swapped
561 done[node_pt(j)] = false;
562 }
563
564 // Initialise counter for number of nodes
565 unsigned long count = 0;
566
567 // Get the number of elements in the mesh
568 unsigned nel = nelement();
569
570 // Loop over all elements
571 for (unsigned e = 0; e < nel; e++)
572 {
573 // Make sure the e-th element is a FiniteElement (or derived) class
574 // object
577
578 // Get the number of nodes in this element
579 unsigned nnod = el_pt->nnode();
580
581 // Loop over nodes in element
582 for (unsigned j = 0; j < nnod; j++)
583 {
584 // Get a pointer to the j-th node in the element
586
587 // Has node been done yet?
588 if (!done[nod_pt])
589 {
590 // Insert into node vector. NOTE: If you get a seg fault/range
591 // checking error here then you probably haven't added all the
592 // elements' nodes to the Node_pt vector -- this is most likely to
593 // arise in the case of meshes of face elements (though they usually
594 // don't store the nodes at all so if you have any problems here
595 // there's something unusual/not quite right in any case... For this
596 // reason we don't range check here by default (not even under
597 // paranoia) but force you turn on proper (costly) range checking to
598 // track this down...
600
601 // Indicate that the node has been done
602 done[nod_pt] = true;
603
604 // Increase counter
605 count++;
606 }
607 } // for (unsigned j=0;j<nnod;j++)
608 } // for (unsigned e=0;e<nel;e++)
609
610 // Sanity check
611 if (count != nnod)
612 {
613 // Create an error message
614 std::string error_message = "Trouble: Number of nodes hasn't stayed ";
615
616 // Finish off the message
617 error_message += "constant during reordering!\n";
618
619 // Throw an error
620 throw OomphLibError(
622 }
623 }
624 else
625 {
626 // Copy node vector out
627 unsigned n_node = nnode();
628
629 // Resize the node ordering vector
630 reordering.resize(n_node);
631
632 // Loop over the nodes
633 for (unsigned i = 0; i < n_node; i++)
634 {
635 // Assign the i-th node pointer entry
636 reordering[i] = node_pt(i);
637 }
638
639 // Now sort the nodes lexicographically
640 std::sort(reordering.begin(),
641 reordering.end(),
643 } // if (use_old_ordering)
644 } // End of get_node_reordering
645
646
647 //========================================================
648 /// Virtual Destructor to clean up all memory
649 //========================================================
651 {
652 // Free the nodes first
653 // Loop over the nodes in reverse order
654 unsigned long Node_pt_range = Node_pt.size();
655 for (unsigned long i = Node_pt_range; i > 0; i--)
656 {
657 delete Node_pt[i - 1];
658 Node_pt[i - 1] = 0;
659 }
660
661 // Free the elements
662 // Loop over the elements in reverse order
663 unsigned long Element_pt_range = Element_pt.size();
664 for (unsigned long i = Element_pt_range; i > 0; i--)
665 {
666 delete Element_pt[i - 1];
667 Element_pt[i - 1] = 0;
668 }
669
670 // Wipe the storage for all externally-based elements and delete halos
672 }
673
674 //========================================================
675 /// Assign (global) equation numbers to the nodes
676 //========================================================
678 {
679 // Find out the current number of equations
680 unsigned long equation_number = Dof_pt.size();
681
682 // Loop over the nodes and call their assigment functions
683 unsigned long nnod = Node_pt.size();
684
685 for (unsigned long i = 0; i < nnod; i++)
686 {
687 Node_pt[i]->assign_eqn_numbers(equation_number, Dof_pt);
688 }
689
690 // Loop over the elements and number their internals
691 unsigned long nel = Element_pt.size();
692 for (unsigned long i = 0; i < nel; i++)
693 {
695 }
696
697 // Return the total number of equations
698 return (equation_number);
699 }
700
701 //========================================================
702 /// Function to describe the dofs of the Mesh. The ostream
703 /// specifies the output stream to which the description
704 /// is written; the string stores the currently
705 /// assembled output that is ultimately written to the
706 /// output stream by Data::describe_dofs(...); it is typically
707 /// built up incrementally as we descend through the
708 /// call hierarchy of this function when called from
709 /// Problem::describe_dofs(...)
710 //========================================================
711 void Mesh::describe_dofs(std::ostream& out,
712 const std::string& current_string) const
713 {
714 // Loop over the nodes and call their classification functions
715 unsigned long nnod = Node_pt.size();
716 for (unsigned long i = 0; i < nnod; i++)
717 {
718 std::stringstream conversion;
719 conversion << " of Node " << i << current_string;
720 std::string in(conversion.str());
722 }
723
724 // Loop over the elements and classify.
725 unsigned long nel = Element_pt.size();
726 for (unsigned long i = 0; i < nel; i++)
727 {
728 std::stringstream conversion;
729 conversion << " in Element " << i << " [" << typeid(*Element_pt[i]).name()
730 << "] " << current_string;
731 std::string in(conversion.str());
733 }
734 }
735
736 //========================================================
737 /// Function to describe the local dofs of the elements. The ostream
738 /// specifies the output stream to which the description
739 /// is written; the string stores the currently
740 /// assembled output that is ultimately written to the
741 /// output stream by Data::describe_dofs(...); it is typically
742 /// built up incrementally as we descend through the
743 /// call hierarchy of this function when called from
744 /// Problem::describe_dofs(...)
745 //========================================================
746 void Mesh::describe_local_dofs(std::ostream& out,
747 const std::string& current_string) const
748 {
749 // Now loop over the elements and describe local dofs
750 unsigned long nel = Element_pt.size();
751 for (unsigned long i = 0; i < nel; i++)
752 {
753 std::stringstream conversion;
754 conversion << " in Element" << i << " [" << typeid(*Element_pt[i]).name()
755 << "] " << current_string;
756 std::string in(conversion.str());
758 }
759 }
760
761
762 //========================================================
763 /// Assign local equation numbers in all elements
764 //========================================================
766 {
767 // Now loop over the elements and assign local equation numbers
768 unsigned long Element_pt_range = Element_pt.size();
769 for (unsigned long i = 0; i < Element_pt_range; i++)
770 {
772 }
773 }
774
775 //========================================================
776 /// Self-test: Check elements and nodes. Return 0 for OK
777 //========================================================
779 {
780 // Initialise
781 bool passed = true;
782
783 // Check the mesh for repeated nodes (issues its own error message)
784 if (0 != check_for_repeated_nodes()) passed = false;
785
786 // hierher -- re-enable once problem with Hermite elements has been
787 // resolved.
788 // // Check if there are any inverted elements
789 // bool mesh_has_inverted_elements=false;
790 // check_inverted_elements(mesh_has_inverted_elements);
791 // if (mesh_has_inverted_elements)
792 // {
793 // passed=false;
794 // oomph_info << "\n ERROR: Mesh has inverted elements\n"
795 // << " Run Mesh::check_inverted_elements(...) with"
796 // << " with output stream to find out which elements are"
797 // << " inverted.\n";
798 // }
799
800 // Loop over the elements, check for duplicates and do self test
801 std::set<GeneralisedElement*> element_set_pt;
802 unsigned long Element_pt_range = Element_pt.size();
803 for (unsigned long i = 0; i < Element_pt_range; i++)
804 {
805 if (Element_pt[i]->self_test() != 0)
806 {
807 passed = false;
808 oomph_info << "\n ERROR: Failed Element::self_test() for element i="
809 << i << std::endl;
810 }
811 // Add to set (which ignores duplicates):
812 element_set_pt.insert(Element_pt[i]);
813 }
814
815 // Check for duplicates:
817 {
819 << " duplicate elements were encountered in mesh!"
820 << std::endl;
821 passed = false;
822 }
823
824
825 // Loop over the nodes, check for duplicates and do self test
826 std::set<Node*> node_set_pt;
827 unsigned long Node_pt_range = Node_pt.size();
828 for (unsigned long i = 0; i < Node_pt_range; i++)
829 {
830 if (Node_pt[i]->self_test() != 0)
831 {
832 passed = false;
833 oomph_info << "\n ERROR: Failed Node::self_test() for node i=" << i
834 << std::endl;
835 }
836 // Add to set (which ignores duplicates):
837 node_set_pt.insert(Node_pt[i]);
838 }
839
840 // Check for duplicates:
842 {
843 oomph_info << "ERROR: " << Node_pt_range - node_set_pt.size()
844 << " duplicate nodes were encountered in mesh!" << std::endl;
845 passed = false;
846 }
847
848 // Return verdict
849 if (passed)
850 {
851 return 0;
852 }
853 else
854 {
855 return 1;
856 }
857 }
858
859
860 //========================================================
861 /// Check for inverted elements and report outcome
862 /// in boolean variable. This visits all elements at their
863 /// integration points and checks if the Jacobian of the
864 /// mapping between local and global coordinates is positive --
865 /// using the same test that would be carried out (but only in PARANOID
866 /// mode) during the assembly of the elements' Jacobian matrices.
867 /// Inverted elements are output in inverted_element_file (if the
868 /// stream is open).
869 //========================================================
871 std::ofstream& inverted_element_file)
872 {
873 // Initialise flag
875
876 // Suppress output while checking for inverted elements
877 bool backup =
880
881 // Loop over all elements
882 unsigned nelem = nelement();
883 for (unsigned e = 0; e < nelem; e++)
884 {
886
887 // Only check for finite elements
888 if (el_pt != 0)
889 {
890 // Find out number of nodes and local coordinates in the element
891 unsigned n_node = el_pt->nnode();
892 unsigned n_dim = el_pt->dim();
893 unsigned ndim_node = el_pt->nodal_dimension();
894
895 // Can't check Jacobian for elements in which nodal and elementa
896 // dimensions don't match
897 if (n_dim == ndim_node)
898 {
899 // Set up memory for the shape and test function and local coord
903
904 // Initialise element-level test
905 bool is_inverted = false;
906
907 unsigned n_intpt = el_pt->integral_pt()->nweight();
908 for (unsigned ipt = 0; ipt < n_intpt; ipt++)
909 {
910 // Local coordinates
911 for (unsigned i = 0; i < n_dim; i++)
912 {
913 s[i] = el_pt->integral_pt()->knot(ipt, i);
914 }
915
916 double J = 0;
917 // Dummy assignment to keep gcc from complaining about
918 // "set but unused".
919 J += 0.0;
920 try
921 {
922 // Call the derivatives of the shape functions and Jacobian
924
925 // If code is compiled without PARANOID setting, the
926 // above call will simply return the negative Jacobian
927 // without failing, so we need to check manually
928#ifndef PARANOID
929 try
930 {
932 }
933 catch (const OomphLibQuietException& error)
934 {
935 is_inverted = true;
936 }
937#endif
938 }
939 catch (const OomphLibQuietException& error)
940 {
941 is_inverted = true;
942 }
943 }
944 if (is_inverted)
945 {
947 if (inverted_element_file.is_open())
948 {
950 }
951 }
952 }
953 }
954 }
955 // Reset
957 backup;
958 }
959
960
961 //========================================================
962 /// Nodes that have been marked as obsolete are removed
963 /// from the mesh and the its boundaries. Returns vector
964 /// of pointers to deleted nodes.
965 //========================================================
967 {
968 // Only copy the 'live' nodes across to new mesh
969 //----------------------------------------------
970
971 // New Vector of pointers to nodes
974
975 // Loop over all nodes in mesh
976 unsigned long n_node = nnode();
977 for (unsigned long n = 0; n < n_node; n++)
978 {
979 // If the node still exists: Copy across
980 if (!(Node_pt[n]->is_obsolete()))
981 {
982 new_node_pt.push_back(Node_pt[n]);
983 }
984 // Otherwise the Node is gone:
985 // Delete it for good if it does not lie on a boundary
986 // (if it lives on a boundary we have to remove it from
987 // the boundary lookup schemes below)
988 else
989 {
990 if (!(Node_pt[n]->is_on_boundary()))
991 {
992 deleted_node_pt.push_back(Node_pt[n]);
993 delete Node_pt[n];
994 Node_pt[n] = 0;
995 }
996 }
997 }
998
999 // Now update old vector by setting it equal to the new vector
1001
1002
1003 // Boundaries
1004 //-----------
1005
1006 // Only copy the 'live' nodes into new boundary node arrays
1007 //---------------------------------------------------------
1008 // Loop over the boundaries
1009 unsigned num_bound = nboundary();
1010 for (unsigned ibound = 0; ibound < num_bound; ibound++)
1011 {
1012 // New Vector of pointers to existent boundary nodes
1014
1015 // Loop over the boundary nodes
1016 unsigned long Nboundary_node = Boundary_node_pt[ibound].size();
1017
1018 // Reserve contiguous memory for new vector of pointers
1019 // Must be equal in size to the number of nodes or less
1021
1022 for (unsigned long n = 0; n < Nboundary_node; n++)
1023 {
1024 // If node still exists: Copy across
1025 if (!(Boundary_node_pt[ibound][n]->is_obsolete()))
1026 {
1028 }
1029 // Otherwise Node is gone: Delete it for good
1030 else
1031 {
1032 // The node may lie on multiple boundaries, so remove the node
1033 // from the current boundary
1034 Boundary_node_pt[ibound][n]->remove_from_boundary(ibound);
1035
1036 // Now if the node is no longer on any boundaries, delete it
1037 if (!Boundary_node_pt[ibound][n]->is_on_boundary())
1038 {
1039 deleted_node_pt.push_back(
1040 dynamic_cast<Node*>(Boundary_node_pt[ibound][n]));
1041
1042 delete Boundary_node_pt[ibound][n];
1043 }
1044 }
1045 }
1046
1047 // Update the old vector by setting it equal to the new vector
1049
1050 } // End of loop over boundaries
1051
1052 // Tell us who you deleted
1053 return deleted_node_pt;
1054 }
1055
1056
1057 //========================================================
1058 /// Output function for the mesh boundaries
1059 ///
1060 /// Loop over all boundaries and dump out the coordinates
1061 /// of the points on the boundary (in individual tecplot
1062 /// zones)
1063 //========================================================
1065 {
1066 // Loop over the boundaries
1067 unsigned num_bound = nboundary();
1068 for (unsigned long ibound = 0; ibound < num_bound; ibound++)
1069 {
1070 unsigned nnod = Boundary_node_pt[ibound].size();
1071 if (nnod > 0)
1072 {
1073 outfile << "ZONE T=\"boundary" << ibound << "\"\n";
1074
1075 for (unsigned inod = 0; inod < nnod; inod++)
1076 {
1078 }
1079 }
1080 }
1081 }
1082
1083
1084 //===================================================================
1085 /// Dump function for the mesh class.
1086 /// Loop over all nodes and elements and dump them
1087 //===================================================================
1088 void Mesh::dump(std::ofstream& dump_file, const bool& use_old_ordering) const
1089 {
1090 // Get a reordering of the nodes so that the dump file is in a standard
1091 // ordering regardless of the sequence of mesh refinements etc.
1093 this->get_node_reordering(reordering, use_old_ordering);
1094
1095 // Find number of nodes
1096 unsigned long Node_pt_range = this->nnode();
1097
1098 // Doc # of nodes
1099 dump_file << Node_pt_range << " # number of nodes " << std::endl;
1100
1101 // Loop over all the nodes and dump their data
1102 for (unsigned nd = 0; nd < Node_pt_range; nd++)
1103 {
1104 reordering[nd]->dump(dump_file);
1105 }
1106
1107 // Loop over elements and deal with internal data
1108 unsigned n_element = this->nelement();
1109 for (unsigned e = 0; e < n_element; e++)
1110 {
1112 unsigned n_internal = el_pt->ninternal_data();
1113 if (n_internal > 0)
1114 {
1116 << " # number of internal Data items in element " << e
1117 << std::endl;
1118 for (unsigned i = 0; i < n_internal; i++)
1119 {
1121 }
1122 }
1123 }
1124 }
1125
1126
1127 //=======================================================
1128 /// Read solution from restart file
1129 //=======================================================
1130 void Mesh::read(std::ifstream& restart_file)
1131 {
1132 std::string input_string;
1133
1134 // Reorder the nodes within the mesh's node vector
1135 // to establish a standard ordering regardless of the sequence
1136 // of mesh refinements etc
1137 this->reorder_nodes();
1138
1139 // Read nodes
1140
1141 // Find number of nodes
1142 unsigned long n_node = this->nnode();
1143
1144 // Read line up to termination sign
1145 getline(restart_file, input_string, '#');
1146
1147 // Ignore rest of line
1148 restart_file.ignore(80, '\n');
1149
1150 // Check # of nodes:
1151 unsigned long check_n_node = atoi(input_string.c_str());
1152 if (check_n_node != n_node)
1153 {
1154 std::ostringstream error_stream;
1155 error_stream << "The number of nodes allocated " << n_node
1156 << " is not the same as specified in the restart file "
1157 << check_n_node << std::endl;
1158
1159 throw OomphLibError(
1161 }
1162
1163 // Loop over the nodes
1164 for (unsigned long n = 0; n < n_node; n++)
1165 {
1166 /// Try to cast to elastic node
1167 SolidNode* el_node_pt = dynamic_cast<SolidNode*>(this->node_pt(n));
1168 if (el_node_pt != 0)
1169 {
1170 el_node_pt->read(restart_file);
1171 }
1172 else
1173 {
1174 this->node_pt(n)->read(restart_file);
1175 }
1176 }
1177
1178 // Read internal data of elements:
1179 //--------------------------------
1180 // Loop over elements and deal with internal data
1181 unsigned n_element = this->nelement();
1182 for (unsigned e = 0; e < n_element; e++)
1183 {
1185 unsigned n_internal = el_pt->ninternal_data();
1186 if (n_internal > 0)
1187 {
1188 // Read line up to termination sign
1189 getline(restart_file, input_string, '#');
1190
1191 // Ignore rest of line
1192 restart_file.ignore(80, '\n');
1193
1194 // Check # of internals :
1195 unsigned long check_n_internal = atoi(input_string.c_str());
1197 {
1198 std::ostringstream error_stream;
1199 error_stream << "The number of internal data " << n_internal
1200 << " is not the same as specified in the restart file "
1201 << check_n_internal << std::endl;
1202
1203 throw OomphLibError(error_stream.str(),
1206 }
1207
1208 for (unsigned i = 0; i < n_internal; i++)
1209 {
1211 }
1212 }
1213 }
1214 }
1215
1216
1217 //========================================================
1218 /// Output in paraview format into specified file.
1219 ///
1220 /// Breaks up each element into sub-elements for plotting
1221 /// purposes. We assume that all elements are of the same
1222 /// type (fct will break (in paranoid mode) if paraview
1223 /// output fcts of the elements are inconsistent).
1224 //========================================================
1225 void Mesh::output_paraview(std::ofstream& file_out,
1226 const unsigned& nplot) const
1227 {
1228 // Change the scientific format so that E is used rather than e
1229 file_out.setf(std::ios_base::uppercase);
1230
1231 // Decide how many elements there are to be plotted
1232 unsigned long number_of_elements = this->Element_pt.size();
1233
1234 // Cast to finite element and return if cast fails.
1235 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(0));
1236
1237#ifdef PARANOID
1238 if (fe_pt == 0)
1239 {
1240 throw OomphLibError("Recast for FiniteElement failed for element 0!\n",
1243 }
1244#endif
1245
1246
1247#ifdef PARANOID
1248 // Check if all elements have the same number of degrees of freedom,
1249 // if they don't, paraview will break
1250 unsigned el_zero_ndof = fe_pt->nscalar_paraview();
1251 for (unsigned i = 1; i < number_of_elements; i++)
1252 {
1253 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1254 unsigned el_i_ndof = fe_pt->nscalar_paraview();
1255 if (el_zero_ndof != el_i_ndof)
1256 {
1257 std::stringstream error_stream;
1259 << "Element " << i << " has different number of degrees of freedom\n"
1260 << "than from previous elements, Paraview cannot handle this.\n"
1261 << "We suggest that the problem is broken up into submeshes instead."
1262 << std::endl;
1263 throw OomphLibError(
1265 }
1266 }
1267#endif
1268
1269 // Make variables to hold the number of nodes and elements
1270 unsigned long number_of_nodes = 0;
1271 unsigned long total_number_of_elements = 0;
1272
1273 // Loop over all the elements to find total number of plot points
1274 for (unsigned i = 0; i < number_of_elements; i++)
1275 {
1276 // Cast to FiniteElement and (in paranoid mode) check
1277 // if cast has failed.
1278 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1279
1280#ifdef PARANOID
1281 if (fe_pt == 0)
1282 {
1283 std::stringstream error_stream;
1284 error_stream << "Recast for element " << i << " failed" << std::endl;
1285 throw OomphLibError(
1287 }
1288#endif
1289
1292 }
1293
1294
1295 // File Declaration
1296 //------------------
1297
1298 // Insert the necessary lines plus header of file, and
1299 // number of nodes and elements
1300 file_out << "<?xml version=\"1.0\"?>\n"
1301 << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\" "
1302 << "byte_order=\"LittleEndian\">\n"
1303 << "<UnstructuredGrid>\n"
1304 << "<Piece NumberOfPoints=\"" << number_of_nodes
1305 << "\" NumberOfCells=\"" << total_number_of_elements << "\">\n";
1306
1307
1308 // Point Data
1309 //-----------
1310
1311 // Check the number of degrees of freedom
1312 unsigned ndof = fe_pt->nscalar_paraview();
1313
1314 // Point data is going in here
1315 file_out << "<PointData ";
1316
1317 // Insert just the first scalar name, since paraview reads everything
1318 // else after that as being of the same type. Get information from
1319 // first element.
1320 file_out << "Scalars=\"" << fe_pt->scalar_name_paraview(0) << "\">\n";
1321
1322 // Loop over i scalar fields and j number of elements
1323 for (unsigned i = 0; i < ndof; i++)
1324 {
1325 file_out << "<DataArray type=\"Float32\" "
1326 << "Name=\"" << fe_pt->scalar_name_paraview(i) << "\" "
1327 << "format=\"ascii\""
1328 << ">\n";
1329
1330 for (unsigned j = 0; j < number_of_elements; j++)
1331 {
1332 // Cast to FiniteElement and (in paranoid mode) check
1333 // if cast has failed.
1334 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(j));
1335
1336#ifdef PARANOID
1337 if (fe_pt == 0)
1338 {
1339 std::stringstream error_stream;
1340 error_stream << "Recast for element " << j << " failed" << std::endl;
1341 throw OomphLibError(error_stream.str(),
1344 }
1345#endif
1346
1348 }
1349
1350 // Close of the DataArray
1351 file_out << "</DataArray>\n";
1352 }
1353
1354 // Close off the PointData set
1355 file_out << "</PointData>\n";
1356
1357
1358 // Geometric Points
1359 //------------------
1360
1361 file_out << "<Points>\n"
1362 << "<DataArray type=\"Float32\""
1363 << " NumberOfComponents=\""
1364 // This always has to be 3 for an unstructured grid
1365 << 3 << "\" "
1366 << "format=\"ascii\">\n";
1367
1368 // Loop over all the elements to print their plot points
1369 for (unsigned i = 0; i < number_of_elements; i++)
1370 {
1371 // Cast to FiniteElement and (in paranoid mode) check
1372 // if cast has failed.
1373 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1374
1375#ifdef PARANOID
1376 if (fe_pt == 0)
1377 {
1378 std::stringstream error_stream;
1379 error_stream << "Recast for element " << i << " faild" << std::endl;
1380 throw OomphLibError(
1382 }
1383#endif
1384
1386 }
1387
1388 file_out << "</DataArray>\n"
1389 << "</Points>\n";
1390
1391
1392 // Cells
1393 //-------
1394
1395 file_out
1396 << "<Cells>\n"
1397 << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n";
1398
1399 // Make counter for keeping track of all the local elements,
1400 // because Paraview requires global coordinates
1401 unsigned counter = 0;
1402
1403 // Write connectivity with the local elements
1404 for (unsigned i = 0; i < number_of_elements; i++)
1405 {
1406 // Cast to FiniteElement and (in paranoid mode) check
1407 // if cast has failed.
1408 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1409
1410#ifdef PARANOID
1411 if (fe_pt == 0)
1412 {
1413 std::stringstream error_stream;
1414 error_stream << "Recast for element " << i << " faild" << std::endl;
1415 throw OomphLibError(
1417 }
1418#endif
1420 }
1421
1422 file_out << "</DataArray>\n"
1423 << "<DataArray type=\"Int32\" "
1424 << "Name=\"offsets\" format=\"ascii\">\n";
1425
1426 // Make variable that holds the current offset number
1427 unsigned offset_sum = 0;
1428
1429 // Write the offset for the specific elements
1430 for (unsigned i = 0; i < number_of_elements; i++)
1431 {
1432 // Cast to FiniteElement and (in paranoid mode) check
1433 // if cast has failed.
1434 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1435
1436#ifdef PARANOID
1437 if (fe_pt == 0)
1438 {
1439 std::stringstream error_stream;
1440 error_stream << "Recast for element " << i << " failed" << std::endl;
1441 throw OomphLibError(
1443 }
1444#endif
1446 }
1447
1448 file_out << "</DataArray>\n"
1449 << "<DataArray type=\"UInt8\" Name=\"types\">\n";
1450
1451 // Loop over all elements to get the type that they have
1452 for (unsigned i = 0; i < number_of_elements; i++)
1453 {
1454 // Cast to FiniteElement and (in paranoid mode) check
1455 // if cast has failed.
1456 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1457
1458#ifdef PARANOID
1459 if (fe_pt == 0)
1460 {
1461 std::stringstream error_stream;
1462 error_stream << "Recast for element " << i << " failed" << std::endl;
1463 throw OomphLibError(
1465 }
1466#endif
1467
1469 }
1470
1471 file_out << "</DataArray>\n"
1472 << "</Cells>\n";
1473
1474
1475 // File Closure
1476 //-------------
1477 file_out << "</Piece>\n"
1478 << "</UnstructuredGrid>\n"
1479 << "</VTKFile>";
1480 }
1481
1482
1483 //========================================================
1484 /// Output in paraview format into specified file.
1485 ///
1486 /// Breaks up each element into sub-elements for plotting
1487 /// purposes. We assume that all elements are of the same
1488 /// type (fct will break (in paranoid mode) if paraview
1489 /// output fcts of the elements are inconsistent).
1490 //========================================================
1492 std::ofstream& file_out,
1493 const unsigned& nplot,
1495 {
1496 // Change the scientific format so that E is used rather than e
1497 file_out.setf(std::ios_base::uppercase);
1498
1499 // Decide how many elements there are to be plotted
1500 unsigned long number_of_elements = this->Element_pt.size();
1501
1502 // Cast to finite element and return if cast fails.
1503 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(0));
1504
1505#ifdef PARANOID
1506 if (fe_pt == 0)
1507 {
1508 throw OomphLibError("Recast for FiniteElement failed for element 0!\n",
1511 }
1512#endif
1513
1514
1515#ifdef PARANOID
1516 // Check if all elements have the same number of degrees of freedom,
1517 // if they don't, paraview will break
1518 unsigned el_zero_ndof = fe_pt->nscalar_paraview();
1519 for (unsigned i = 1; i < number_of_elements; i++)
1520 {
1521 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1522 unsigned el_i_ndof = fe_pt->nscalar_paraview();
1523 if (el_zero_ndof != el_i_ndof)
1524 {
1525 std::stringstream error_stream;
1527 << "Element " << i << " has different number of degrees of freedom\n"
1528 << "than from previous elements, Paraview cannot handle this.\n"
1529 << "We suggest that the problem is broken up into submeshes instead."
1530 << std::endl;
1531 throw OomphLibError(
1533 }
1534 }
1535#endif
1536
1537 // Make variables to hold the number of nodes and elements
1538 unsigned long number_of_nodes = 0;
1539 unsigned long total_number_of_elements = 0;
1540
1541 // Loop over all the elements to find total number of plot points
1542 for (unsigned i = 0; i < number_of_elements; i++)
1543 {
1544 // Cast to FiniteElement and (in paranoid mode) check
1545 // if cast has failed.
1546 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1547
1548#ifdef PARANOID
1549 if (fe_pt == 0)
1550 {
1551 std::stringstream error_stream;
1552 error_stream << "Recast for element " << i << " failed" << std::endl;
1553 throw OomphLibError(
1555 }
1556#endif
1557
1560 }
1561
1562
1563 // File Declaration
1564 //------------------
1565
1566 // Insert the necessary lines plus header of file, and
1567 // number of nodes and elements
1568 file_out << "<?xml version=\"1.0\"?>\n"
1569 << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\" "
1570 << "byte_order=\"LittleEndian\">\n"
1571 << "<UnstructuredGrid>\n"
1572 << "<Piece NumberOfPoints=\"" << number_of_nodes
1573 << "\" NumberOfCells=\"" << total_number_of_elements << "\">\n";
1574
1575
1576 // Point Data
1577 //-----------
1578
1579 // Check the number of degrees of freedom
1580 unsigned ndof = fe_pt->nscalar_paraview();
1581
1582 // Point data is going in here
1583 file_out << "<PointData ";
1584
1585 // Insert just the first scalar name, since paraview reads everything
1586 // else after that as being of the same type. Get information from
1587 // first element.
1588 file_out << "Scalars=\"" << fe_pt->scalar_name_paraview(0) << "\">\n";
1589
1590 // Loop over i scalar fields and j number of elements
1591 for (unsigned i = 0; i < ndof; i++)
1592 {
1593 file_out << "<DataArray type=\"Float32\" "
1594 << "Name=\"" << fe_pt->scalar_name_paraview(i) << "\" "
1595 << "format=\"ascii\""
1596 << ">\n";
1597
1598 for (unsigned j = 0; j < number_of_elements; j++)
1599 {
1600 // Cast to FiniteElement and (in paranoid mode) check
1601 // if cast has failed.
1602 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(j));
1603
1604#ifdef PARANOID
1605 if (fe_pt == 0)
1606 {
1607 std::stringstream error_stream;
1608 error_stream << "Recast for element " << j << " failed" << std::endl;
1609 throw OomphLibError(error_stream.str(),
1612 }
1613#endif
1614
1616 }
1617
1618 // Close of the DataArray
1619 file_out << "</DataArray>\n";
1620 }
1621
1622 // Close off the PointData set
1623 file_out << "</PointData>\n";
1624
1625
1626 // Geometric Points
1627 //------------------
1628
1629 file_out << "<Points>\n"
1630 << "<DataArray type=\"Float32\""
1631 << " NumberOfComponents=\""
1632 // This always has to be 3 for an unstructured grid
1633 << 3 << "\" "
1634 << "format=\"ascii\">\n";
1635
1636 // Loop over all the elements to print their plot points
1637 for (unsigned i = 0; i < number_of_elements; i++)
1638 {
1639 // Cast to FiniteElement and (in paranoid mode) check
1640 // if cast has failed.
1641 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1642
1643#ifdef PARANOID
1644 if (fe_pt == 0)
1645 {
1646 std::stringstream error_stream;
1647 error_stream << "Recast for element " << i << " faild" << std::endl;
1648 throw OomphLibError(
1650 }
1651#endif
1652
1654 }
1655
1656 file_out << "</DataArray>\n"
1657 << "</Points>\n";
1658
1659
1660 // Cells
1661 //-------
1662
1663 file_out
1664 << "<Cells>\n"
1665 << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n";
1666
1667 // Make counter for keeping track of all the local elements,
1668 // because Paraview requires global coordinates
1669 unsigned counter = 0;
1670
1671 // Write connectivity with the local elements
1672 for (unsigned i = 0; i < number_of_elements; i++)
1673 {
1674 // Cast to FiniteElement and (in paranoid mode) check
1675 // if cast has failed.
1676 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1677
1678#ifdef PARANOID
1679 if (fe_pt == 0)
1680 {
1681 std::stringstream error_stream;
1682 error_stream << "Recast for element " << i << " faild" << std::endl;
1683 throw OomphLibError(
1685 }
1686#endif
1688 }
1689
1690 file_out << "</DataArray>\n"
1691 << "<DataArray type=\"Int32\" "
1692 << "Name=\"offsets\" format=\"ascii\">\n";
1693
1694 // Make variable that holds the current offset number
1695 unsigned offset_sum = 0;
1696
1697 // Write the offset for the specific elements
1698 for (unsigned i = 0; i < number_of_elements; i++)
1699 {
1700 // Cast to FiniteElement and (in paranoid mode) check
1701 // if cast has failed.
1702 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1703
1704#ifdef PARANOID
1705 if (fe_pt == 0)
1706 {
1707 std::stringstream error_stream;
1708 error_stream << "Recast for element " << i << " failed" << std::endl;
1709 throw OomphLibError(
1711 }
1712#endif
1714 }
1715
1716 file_out << "</DataArray>\n"
1717 << "<DataArray type=\"UInt8\" Name=\"types\">\n";
1718
1719 // Loop over all elements to get the type that they have
1720 for (unsigned i = 0; i < number_of_elements; i++)
1721 {
1722 // Cast to FiniteElement and (in paranoid mode) check
1723 // if cast has failed.
1724 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1725
1726#ifdef PARANOID
1727 if (fe_pt == 0)
1728 {
1729 std::stringstream error_stream;
1730 error_stream << "Recast for element " << i << " failed" << std::endl;
1731 throw OomphLibError(
1733 }
1734#endif
1735
1737 }
1738
1739 file_out << "</DataArray>\n"
1740 << "</Cells>\n";
1741
1742
1743 // File Closure
1744 //-------------
1745 file_out << "</Piece>\n"
1746 << "</UnstructuredGrid>\n"
1747 << "</VTKFile>";
1748 }
1749
1750
1751 //========================================================
1752 /// Output in paraview format into specified file.
1753 ///
1754 /// Breaks up each element into sub-elements for plotting
1755 /// purposes. We assume that all elements are of the same
1756 /// type (fct will break (in paranoid mode) if paraview
1757 /// output fcts of the elements are inconsistent).
1758 //========================================================
1760 std::ofstream& file_out,
1761 const unsigned& nplot,
1762 const double& time,
1764 {
1765 // Change the scientific format so that E is used rather than e
1766 file_out.setf(std::ios_base::uppercase);
1767
1768 // Decide how many elements there are to be plotted
1769 unsigned long number_of_elements = this->Element_pt.size();
1770
1771 // Cast to finite element and return if cast fails.
1772 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(0));
1773
1774#ifdef PARANOID
1775 if (fe_pt == 0)
1776 {
1777 throw OomphLibError("Recast for FiniteElement failed for element 0!\n",
1780 }
1781#endif
1782
1783
1784#ifdef PARANOID
1785 // Check if all elements have the same number of degrees of freedom,
1786 // if they don't, paraview will break
1787 unsigned el_zero_ndof = fe_pt->nscalar_paraview();
1788 for (unsigned i = 1; i < number_of_elements; i++)
1789 {
1790 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1791 unsigned el_i_ndof = fe_pt->nscalar_paraview();
1792 if (el_zero_ndof != el_i_ndof)
1793 {
1794 std::stringstream error_stream;
1796 << "Element " << i << " has different number of degrees of freedom\n"
1797 << "than from previous elements, Paraview cannot handle this.\n"
1798 << "We suggest that the problem is broken up into submeshes instead."
1799 << std::endl;
1800 throw OomphLibError(
1802 }
1803 }
1804#endif
1805
1806 // Make variables to hold the number of nodes and elements
1807 unsigned long number_of_nodes = 0;
1808 unsigned long total_number_of_elements = 0;
1809
1810 // Loop over all the elements to find total number of plot points
1811 for (unsigned i = 0; i < number_of_elements; i++)
1812 {
1813 // Cast to FiniteElement and (in paranoid mode) check
1814 // if cast has failed.
1815 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1816
1817#ifdef PARANOID
1818 if (fe_pt == 0)
1819 {
1820 std::stringstream error_stream;
1821 error_stream << "Recast for element " << i << " failed" << std::endl;
1822 throw OomphLibError(
1824 }
1825#endif
1826
1829 }
1830
1831
1832 // File Declaration
1833 //------------------
1834
1835 // Insert the necessary lines plus header of file, and
1836 // number of nodes and elements
1837 file_out << "<?xml version=\"1.0\"?>\n"
1838 << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\" "
1839 << "byte_order=\"LittleEndian\">\n"
1840 << "<UnstructuredGrid>\n"
1841 << "<Piece NumberOfPoints=\"" << number_of_nodes
1842 << "\" NumberOfCells=\"" << total_number_of_elements << "\">\n";
1843
1844
1845 // Point Data
1846 //-----------
1847
1848 // Check the number of degrees of freedom
1849 unsigned ndof = fe_pt->nscalar_paraview();
1850
1851 // Point data is going in here
1852 file_out << "<PointData ";
1853
1854 // Insert just the first scalar name, since paraview reads everything
1855 // else after that as being of the same type. Get information from
1856 // first element.
1857 file_out << "Scalars=\"" << fe_pt->scalar_name_paraview(0) << "\">\n";
1858
1859 // Loop over i scalar fields and j number of elements
1860 for (unsigned i = 0; i < ndof; i++)
1861 {
1862 file_out << "<DataArray type=\"Float32\" "
1863 << "Name=\"" << fe_pt->scalar_name_paraview(i) << "\" "
1864 << "format=\"ascii\""
1865 << ">\n";
1866
1867 for (unsigned j = 0; j < number_of_elements; j++)
1868 {
1869 // Cast to FiniteElement and (in paranoid mode) check
1870 // if cast has failed.
1871 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(j));
1872
1873#ifdef PARANOID
1874 if (fe_pt == 0)
1875 {
1876 std::stringstream error_stream;
1877 error_stream << "Recast for element " << j << " failed" << std::endl;
1878 throw OomphLibError(error_stream.str(),
1881 }
1882#endif
1883
1885 file_out, i, nplot, time, exact_soln_pt);
1886 }
1887
1888 // Close of the DataArray
1889 file_out << "</DataArray>\n";
1890 }
1891
1892 // Close off the PointData set
1893 file_out << "</PointData>\n";
1894
1895
1896 // Geometric Points
1897 //------------------
1898
1899 file_out << "<Points>\n"
1900 << "<DataArray type=\"Float32\""
1901 << " NumberOfComponents=\""
1902 // This always has to be 3 for an unstructured grid
1903 << 3 << "\" "
1904 << "format=\"ascii\">\n";
1905
1906 // Loop over all the elements to print their plot points
1907 for (unsigned i = 0; i < number_of_elements; i++)
1908 {
1909 // Cast to FiniteElement and (in paranoid mode) check
1910 // if cast has failed.
1911 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1912
1913#ifdef PARANOID
1914 if (fe_pt == 0)
1915 {
1916 std::stringstream error_stream;
1917 error_stream << "Recast for element " << i << " faild" << std::endl;
1918 throw OomphLibError(
1920 }
1921#endif
1922
1924 }
1925
1926 file_out << "</DataArray>\n"
1927 << "</Points>\n";
1928
1929
1930 // Cells
1931 //-------
1932
1933 file_out
1934 << "<Cells>\n"
1935 << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n";
1936
1937 // Make counter for keeping track of all the local elements,
1938 // because Paraview requires global coordinates
1939 unsigned counter = 0;
1940
1941 // Write connectivity with the local elements
1942 for (unsigned i = 0; i < number_of_elements; i++)
1943 {
1944 // Cast to FiniteElement and (in paranoid mode) check
1945 // if cast has failed.
1946 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1947
1948#ifdef PARANOID
1949 if (fe_pt == 0)
1950 {
1951 std::stringstream error_stream;
1952 error_stream << "Recast for element " << i << " faild" << std::endl;
1953 throw OomphLibError(
1955 }
1956#endif
1958 }
1959
1960 file_out << "</DataArray>\n"
1961 << "<DataArray type=\"Int32\" "
1962 << "Name=\"offsets\" format=\"ascii\">\n";
1963
1964 // Make variable that holds the current offset number
1965 unsigned offset_sum = 0;
1966
1967 // Write the offset for the specific elements
1968 for (unsigned i = 0; i < number_of_elements; i++)
1969 {
1970 // Cast to FiniteElement and (in paranoid mode) check
1971 // if cast has failed.
1972 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1973
1974#ifdef PARANOID
1975 if (fe_pt == 0)
1976 {
1977 std::stringstream error_stream;
1978 error_stream << "Recast for element " << i << " failed" << std::endl;
1979 throw OomphLibError(
1981 }
1982#endif
1984 }
1985
1986 file_out << "</DataArray>\n"
1987 << "<DataArray type=\"UInt8\" Name=\"types\">\n";
1988
1989 // Loop over all elements to get the type that they have
1990 for (unsigned i = 0; i < number_of_elements; i++)
1991 {
1992 // Cast to FiniteElement and (in paranoid mode) check
1993 // if cast has failed.
1994 FiniteElement* fe_pt = dynamic_cast<FiniteElement*>(element_pt(i));
1995
1996#ifdef PARANOID
1997 if (fe_pt == 0)
1998 {
1999 std::stringstream error_stream;
2000 error_stream << "Recast for element " << i << " failed" << std::endl;
2001 throw OomphLibError(
2003 }
2004#endif
2005
2007 }
2008
2009 file_out << "</DataArray>\n"
2010 << "</Cells>\n";
2011
2012
2013 // File Closure
2014 //-------------
2015 file_out << "</Piece>\n"
2016 << "</UnstructuredGrid>\n"
2017 << "</VTKFile>";
2018 }
2019
2020
2021 //========================================================
2022 /// Output function for the mesh class
2023 ///
2024 /// Loop over all elements and plot (i.e. execute
2025 /// the element's own output() function)
2026 //========================================================
2027 void Mesh::output(std::ostream& outfile)
2028 {
2029 // Loop over the elements and call their output functions
2030 // Assign Element_pt_range
2031 unsigned long Element_pt_range = Element_pt.size();
2032 for (unsigned long e = 0; e < Element_pt_range; e++)
2033 {
2034 // Try to cast to FiniteElement
2035 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2036 if (el_pt == 0)
2037 {
2038 oomph_info << "Can't execute output(...) for non FiniteElements"
2039 << std::endl;
2040 }
2041 else
2042 {
2043#ifdef OOMPH_HAS_MPI
2045#endif
2046 {
2048 }
2049#ifdef OOMPH_HAS_MPI
2050 else
2051 {
2052 if (!el_pt->is_halo())
2053 {
2055 }
2056 }
2057#endif
2058 }
2059 }
2060 }
2061
2062 //========================================================
2063 /// Output function for the mesh class
2064 ///
2065 /// Loop over all elements and plot (i.e. execute
2066 /// the element's own output() function). Use
2067 /// n_plot plot points in each coordinate direction.
2068 //========================================================
2069 void Mesh::output(std::ostream& outfile, const unsigned& n_plot)
2070 {
2071 // Loop over the elements and call their output functions
2072 // Assign Element_pt_range
2073 unsigned long Element_pt_range = Element_pt.size();
2074
2075 for (unsigned long e = 0; e < Element_pt_range; e++)
2076 {
2077 // Try to cast to FiniteElement
2078 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2079 if (el_pt == 0)
2080 {
2081 oomph_info << "Can't execute output(...) for non FiniteElements"
2082 << std::endl;
2083 }
2084 else
2085 {
2086#ifdef OOMPH_HAS_MPI
2088#endif
2089 {
2091 }
2092#ifdef OOMPH_HAS_MPI
2093 else
2094 {
2095 if (!el_pt->is_halo())
2096 {
2098 }
2099 }
2100#endif
2101 }
2102 }
2103 }
2104
2105
2106 //========================================================
2107 /// Output function for the mesh class
2108 ///
2109 /// Loop over all elements and plot (i.e. execute
2110 /// the element's own output() function)
2111 /// (C style output)
2112 //========================================================
2114 {
2115 // Loop over the elements and call their output functions
2116 // Assign Element_pt_range
2117 unsigned long Element_pt_range = Element_pt.size();
2118 for (unsigned long e = 0; e < Element_pt_range; e++)
2119 {
2120 // Try to cast to FiniteElement
2121 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2122 if (el_pt == 0)
2123 {
2124 oomph_info << "Can't execute output(...) for non FiniteElements"
2125 << std::endl;
2126 }
2127 else
2128 {
2129#ifdef OOMPH_HAS_MPI
2131#endif
2132 {
2134 }
2135#ifdef OOMPH_HAS_MPI
2136 else
2137 {
2138 if (!el_pt->is_halo())
2139 {
2141 }
2142 }
2143#endif
2144 }
2145 }
2146 }
2147
2148 //========================================================
2149 /// Output function for the mesh class
2150 ///
2151 /// Loop over all elements and plot (i.e. execute
2152 /// the element's own output() function). Use
2153 /// n_plot plot points in each coordinate direction.
2154 /// (C style output)
2155 //========================================================
2156 void Mesh::output(FILE* file_pt, const unsigned& n_plot)
2157 {
2158 // Loop over the elements and call their output functions
2159 // Assign Element_pt_range
2160 unsigned long Element_pt_range = Element_pt.size();
2161 for (unsigned long e = 0; e < Element_pt_range; e++)
2162 {
2163 // Try to cast to FiniteElement
2164 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2165 if (el_pt == 0)
2166 {
2167 oomph_info << "Can't execute output(...) for non FiniteElements"
2168 << std::endl;
2169 }
2170 else
2171 {
2172#ifdef OOMPH_HAS_MPI
2174#endif
2175 {
2177 }
2178#ifdef OOMPH_HAS_MPI
2179 else
2180 {
2181 if (!el_pt->is_halo())
2182 {
2184 }
2185 }
2186#endif
2187 }
2188 }
2189 }
2190
2191
2192 //========================================================
2193 /// Output function for the mesh class
2194 ///
2195 /// Loop over all elements and plot (i.e. execute
2196 /// the element's own output() function). Use
2197 /// n_plot plot points in each coordinate direction.
2198 //========================================================
2199 void Mesh::output_fct(std::ostream& outfile,
2200 const unsigned& n_plot,
2202 {
2203 // Loop over the elements and call their output functions
2204 // Assign Element_pt_range
2205 unsigned long Element_pt_range = Element_pt.size();
2206 for (unsigned long e = 0; e < Element_pt_range; e++)
2207 {
2208 // Try to cast to FiniteElement
2209 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2210 if (el_pt == 0)
2211 {
2212 oomph_info << "Can't execute output_fct(...) for non FiniteElements"
2213 << std::endl;
2214 }
2215 else
2216 {
2217#ifdef OOMPH_HAS_MPI
2219#endif
2220 {
2222 }
2223#ifdef OOMPH_HAS_MPI
2224 else
2225 {
2226 if (!el_pt->is_halo())
2227 {
2229 }
2230 }
2231#endif
2232 }
2233 }
2234 }
2235
2236 //========================================================
2237 /// Output function for the mesh class
2238 ///
2239 /// Loop over all elements and plot (i.e. execute
2240 /// the element's own output() function) at time t. Use
2241 /// n_plot plot points in each coordinate direction.
2242 //========================================================
2243 void Mesh::output_fct(std::ostream& outfile,
2244 const unsigned& n_plot,
2245 const double& time,
2247 {
2248 // Loop over the elements and call their output functions
2249 // Assign Element_pt_range
2250 unsigned long Element_pt_range = Element_pt.size();
2251 for (unsigned long e = 0; e < Element_pt_range; e++)
2252 {
2253 // Try to cast to FiniteElement
2254 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(Element_pt[e]);
2255 if (el_pt == 0)
2256 {
2257 oomph_info << "Can't execute output_fct(...) for non FiniteElements"
2258 << std::endl;
2259 }
2260 else
2261 {
2262#ifdef OOMPH_HAS_MPI
2264#endif
2265 {
2267 }
2268#ifdef OOMPH_HAS_MPI
2269 else
2270 {
2271 if (!el_pt->is_halo())
2272 {
2274 }
2275 }
2276#endif
2277 }
2278 }
2279 }
2280
2281 //==================================================================
2282 /// Assign the initial values for an impulsive start, which is
2283 /// acheived by looping over all data in the mesh (internal element
2284 /// data and data stored at nodes) and setting the calling the
2285 /// assign_initial_values_impulsive() function for each data's
2286 /// timestepper
2287 //=================================================================
2289 {
2290 // Loop over the elements
2291 unsigned long Nelement = nelement();
2292 for (unsigned long e = 0; e < Nelement; e++)
2293 {
2294 // Find the number of internal dofs
2295 unsigned Ninternal = element_pt(e)->ninternal_data();
2296 // Loop over internal dofs and shift the time values
2297 // using the internals data's timestepper
2298 for (unsigned j = 0; j < Ninternal; j++)
2299 {
2300 element_pt(e)
2302 ->time_stepper_pt()
2303 ->assign_initial_values_impulsive(element_pt(e)->internal_data_pt(j));
2304 }
2305 }
2306
2307 // Loop over the nodes
2308 unsigned long n_node = nnode();
2309 for (unsigned long n = 0; n < n_node; n++)
2310 {
2311 // Assign initial values using the Node's timestepper
2313 Node_pt[n]);
2314 // Assign initial positions using the Node's timestepper
2315 Node_pt[n]
2316 ->position_time_stepper_pt()
2317 ->assign_initial_positions_impulsive(Node_pt[n]);
2318 }
2319 }
2320
2321 //===============================================================
2322 /// Shift time-dependent data along for next timestep:
2323 /// Again this is achieved by looping over all data and calling
2324 /// the functions defined in each data object's timestepper.
2325 //==============================================================
2327 {
2328 // Loop over the elements which shift their internal data
2329 // via their own timesteppers
2330 const unsigned long Nelement = nelement();
2331 for (unsigned long e = 0; e < Nelement; e++)
2332 {
2333 // Find the number of internal dofs
2334 const unsigned Ninternal = element_pt(e)->ninternal_data();
2335 // Loop over internal dofs and shift the time values
2336 // using the internals data's timestepper
2337 for (unsigned j = 0; j < Ninternal; j++)
2338 {
2339 element_pt(e)
2341 ->time_stepper_pt()
2342 ->shift_time_values(element_pt(e)->internal_data_pt(j));
2343 }
2344 }
2345
2346 // Loop over the nodes
2347 const unsigned long n_node = nnode();
2348 for (unsigned long n = 0; n < n_node; n++)
2349 {
2350 // Shift the Data associated with the nodes with the Node's own
2351 // timestepper
2353 // Push history of nodal positions back
2354 Node_pt[n]->position_time_stepper_pt()->shift_time_positions(Node_pt[n]);
2355 }
2356 }
2357
2358 //=========================================================================
2359 /// Calculate predictions for all Data and positions associated
2360 /// with the mesh. This is usually only used for adaptive time-stepping
2361 /// when the comparison between a predicted value and the actual value
2362 /// is usually used to determine the change in step size. Again the
2363 /// loop is over all data in the mesh and individual timestepper functions
2364 /// for each data value are called.
2365 //=========================================================================
2367 {
2368 // Loop over the elements which shift their internal data
2369 // via their own timesteppers
2370 const unsigned long Nelement = nelement();
2371 for (unsigned long e = 0; e < Nelement; e++)
2372 {
2373 // Find the number of internal dofs
2374 const unsigned Ninternal = element_pt(e)->ninternal_data();
2375 // Loop over internal dofs and calculate predicted positions
2376 // using the internals data's timestepper
2377 for (unsigned j = 0; j < Ninternal; j++)
2378 {
2379 element_pt(e)
2381 ->time_stepper_pt()
2382 ->calculate_predicted_values(element_pt(e)->internal_data_pt(j));
2383 }
2384 }
2385
2386 // Loop over the nodes
2387 const unsigned long n_node = nnode();
2388 for (unsigned long n = 0; n < n_node; n++)
2389 {
2390 // Calculate the predicted values at the nodes
2392 // Calculate the predicted positions
2393 Node_pt[n]->position_time_stepper_pt()->calculate_predicted_positions(
2394 Node_pt[n]);
2395 }
2396 }
2397
2398 //===============================================================
2399 /// Virtual function that should be overloaded if the mesh
2400 /// has any mesh level storage of the timestepper
2401 //==================================================================
2403 const bool& preserve_existing_data)
2404 {
2405#ifdef PARANOID
2407 {
2408 std::ostringstream warning_stream;
2410 << "Empty set_mesh_level_time_stepper() has been called.\n"
2411 << "This function needs to be overloaded to reset any (pointers to) \n"
2412 << "timesteppers for meshes that store timesteppers in locations "
2413 "other\n"
2414 << "than the Nodes or Elements;\n"
2415 << "e.g. SpineMeshes have SpineData with timesteppers,\n"
2416 << "Triangle and TetMeshes store the timestepper for use in "
2417 "adaptivity.\n\n\n";
2419 << "If you are solving a continuation or bifurcation detecion\n"
2420 << "problem and strange things are happening, then check that\n"
2421 << "you don't need to overload this function for your mesh."
2422 << "\n This warning can be suppressed by setting:\n"
2423 << "Mesh::Suppress_warning_about_empty_mesh_level_time_stepper_"
2424 "function=true"
2425 << std::endl;
2428 }
2429#endif
2430 }
2431
2432 //===============================================================
2433 /// Set the values of auxilliary data used in continuation problems
2434 /// when the data is pinned.
2435 //==================================================================
2438 {
2439 // Loop over the nodes
2440 const unsigned long n_node = this->nnode();
2441 for (unsigned long n = 0; n < n_node; n++)
2442 {
2443 continuation_storage_pt->set_consistent_pinned_values(this->Node_pt[n]);
2444 continuation_storage_pt->set_consistent_pinned_positions(
2445 this->Node_pt[n]);
2446 }
2447
2448 // Loop over the elements
2449 const unsigned long n_element = this->nelement();
2450 for (unsigned long e = 0; e < n_element; e++)
2451 {
2452 // Cache pointer to the elemnet
2453 GeneralisedElement* const elem_pt = this->element_pt(e);
2454 // Find the number of internal dofs
2455 const unsigned n_internal = elem_pt->ninternal_data();
2456
2457 // Loop over internal dofs and test the data
2458 for (unsigned j = 0; j < n_internal; j++)
2459 {
2460 continuation_storage_pt->set_consistent_pinned_values(
2462 }
2463 }
2464 }
2465
2466
2467 //===============================================================
2468 /// Return true if the pointer corresponds to any data stored in
2469 /// the mesh and false if not
2470 //==================================================================
2472 {
2473 // Loop over the nodes
2474 const unsigned long n_node = this->nnode();
2475 for (unsigned long n = 0; n < n_node; n++)
2476 {
2477 // Check the values and positional data associated with each node
2478 if ((this->Node_pt[n]->does_pointer_correspond_to_value(parameter_pt)) ||
2479 (this->Node_pt[n]->does_pointer_correspond_to_position_data(
2480 parameter_pt)))
2481 {
2482 return true;
2483 }
2484 }
2485
2486 // Loop over the elements
2487 const unsigned long n_element = this->nelement();
2488 for (unsigned long e = 0; e < n_element; e++)
2489 {
2490 // Cache pointer to the elemnet
2491 GeneralisedElement* const elem_pt = this->element_pt(e);
2492
2493 // Find the number of internal dofs
2494 const unsigned n_internal = elem_pt->ninternal_data();
2495
2496 // Loop over internal dofs and test the data
2497 for (unsigned j = 0; j < n_internal; j++)
2498 {
2500 parameter_pt))
2501 {
2502 return true;
2503 }
2504 }
2505 }
2506
2507 // If we get here we haven't found the data, so return false
2508 return false;
2509 }
2510
2511
2512 //===============================================================
2513 /// Set the time stepper associated with all the nodal data
2514 /// in the problem
2515 //==============================================================
2516 void Mesh::set_nodal_time_stepper(TimeStepper* const& time_stepper_pt,
2517 const bool& preserve_existing_data)
2518 {
2519 // Loop over the nodes
2520 const unsigned long n_node = this->nnode();
2521 for (unsigned long n = 0; n < n_node; n++)
2522 {
2523 // Set the timestepper associated with each node
2524 this->Node_pt[n]->set_time_stepper(time_stepper_pt,
2526 this->Node_pt[n]->set_position_time_stepper(time_stepper_pt,
2528 }
2529 }
2530
2531 //===============================================================
2532 /// Set the time stepper associated with all internal data stored
2533 /// in the elements in the mesh
2534 //===============================================================
2536 TimeStepper* const& time_stepper_pt, const bool& preserve_existing_data)
2537 {
2538 // Loop over the elements
2539 const unsigned long n_element = this->nelement();
2540 for (unsigned long e = 0; e < n_element; e++)
2541 {
2542 // Find the number of internal dofs
2543 const unsigned n_internal = this->element_pt(e)->ninternal_data();
2544
2545 // Loop over internal dofs and set the timestepper
2546 for (unsigned j = 0; j < n_internal; j++)
2547 {
2549 time_stepper_pt, preserve_existing_data);
2550 }
2551 }
2552 }
2553
2554 //========================================================================
2555 /// A function that upgrades an ordinary node to a boundary node.
2556 /// All pointers to the node from the mesh's elements are found.
2557 /// and replaced by pointers to the new boundary node. If the node
2558 /// is present in the mesh's list of nodes, that pointer is also
2559 /// replaced. Finally, the pointer argument node_pt addresses the new
2560 /// node on return from the function.
2561 /// We shouldn't ever really use this, but it does make life that
2562 /// bit easier for the lazy mesh writer.
2563 //=======================================================================
2565 {
2566 // Cache a list of FiniteElement pointers for use in this function.
2568 for (unsigned e = 0, ne = nelement(); e < ne; e++)
2569 {
2570 // Some elements may not have been build yet, just store a null pointer
2571 // for these cases.
2572 if (Element_pt[e] == 0) fe_pt[e] = 0;
2573 else
2575 }
2576
2577 // Now call the real function
2579 }
2580
2581 // ============================================================
2582 /// As convert_to_boundary_node but with a vector of pre-"dynamic cast"ed
2583 /// pointers passed in. If this function is being called often then
2584 /// creating this vector and passing it in explicitly each time can give a
2585 /// large speed up.
2586 // Note: the real reason that this function is so slow in the first place
2587 // is because it has to loop over all elements. So if you use this function
2588 // O(N) times your boundary node creation complexity is O(N^2).
2589 // ============================================================
2591 Node*& node_pt, const Vector<FiniteElement*>& finite_element_pt)
2592 {
2593 // If the node is already a boundary node, then return straight away,
2594 // we don't need to do anything
2595 if (dynamic_cast<BoundaryNodeBase*>(node_pt) != 0)
2596 {
2597 return;
2598 }
2599
2600 // Loop over all the elements in the mesh and find all those in which
2601 // the present node is referenced and the corresponding local node number
2602 // in those elements.
2603
2604 // Storage for elements and local node number
2605 std::list<std::pair<unsigned long, int>>
2607
2608 // Loop over all elements
2609 unsigned long n_element = this->nelement();
2610 for (unsigned long e = 0; e < n_element; e++)
2611 {
2612 // Buffer the case when we have not yet filled up the element array
2613 // Unfortunately, we should not assume that the array has been filled
2614 // in a linear order, so we can't break out early.
2615 if (Element_pt[e] != 0)
2616 {
2617 // Find the local node number of the passed node
2619 // If the node is present in the element, add it to our list and
2620 // NULL out the local element entries
2621 if (node_number != -1)
2622 {
2625 std::make_pair(e, node_number));
2626 // Null it out
2628 }
2629 }
2630 } // End of loop over elements
2631
2632 // If there are no entries in the list we are in real trouble
2634 {
2635 std::ostringstream error_stream;
2636 error_stream << "Node " << node_pt
2637 << " is not contained in any elements in the Mesh."
2638 << std::endl
2639 << "How was it created then?" << std::endl;
2640
2641 throw OomphLibError(
2643 }
2644
2645
2646 // Create temporary storage for a pointer to the old node.
2647 // This is required because if we have passed a reference to the
2648 // first element that we find, constructing the new node
2649 // will over-write our pointer and we'll get segmentation faults.
2651
2652 // We now create the new node by using the first element in the list
2653 std::list<std::pair<unsigned long, int>>::iterator list_it =
2655
2656 // Create a new boundary node, using the timestepper from the
2657 // original node
2658 Node* new_node_pt =
2660 list_it->second, node_pt->time_stepper_pt());
2661
2662 // Now copy all the information accross from the old node
2663
2664 // Can we cast the node to a solid node
2665 SolidNode* solid_node_pt = dynamic_cast<SolidNode*>(new_node_pt);
2666 // If it's a solid node, do the casting
2667 if (solid_node_pt != 0)
2668 {
2669 solid_node_pt->copy(dynamic_cast<SolidNode*>(old_node_pt));
2670 }
2671 else
2672 {
2673 new_node_pt->copy(old_node_pt);
2674 }
2675
2676 // Loop over all other elements in the list and set their pointers
2677 // to the new node
2678 for (++list_it; // Increment the iterator
2680 ++list_it)
2681 {
2683 }
2684
2685 // Finally, find the position of the node in the global mesh
2687 std::find(Node_pt.begin(), Node_pt.end(), old_node_pt);
2688
2689 // If it is in the mesh, update the pointer
2690 if (it != Node_pt.end())
2691 {
2692 *it = new_node_pt;
2693 }
2694
2695 // Can now delete the old node
2696 delete old_node_pt;
2697
2698 // Replace the passed pointer by a pointer to the new node
2699 // Note that in most cases, this will be wasted work because the node
2700 // pointer will either the pointer in the mesh's or an element's
2701 // node_pt vector. Still assignment is quicker than an if to check this.
2703 }
2704
2705#ifdef OOMPH_HAS_MPI
2706
2707 //========================================================================
2708 /// Setup shared node scheme: Shared node lookup scheme contains
2709 /// a unique correspondence between all nodes on the halo/haloed
2710 /// elements between two processors.
2711 //========================================================================
2713 {
2714 // Determine the shared nodes lookup scheme - all nodes located on the
2715 // halo(ed) elements between two domains. This scheme is necessary in order
2716 // to identify master nodes that may not be present in the halo-haloed
2717 // element lookup scheme between two processors (for example, if the node
2718 // is on an element which is in a lookup scheme between two higher-numbered
2719 // processors)
2720
2721 double t_start = 0.0;
2723 {
2725 }
2726
2727 // Store number of processors and current process
2728 int n_proc = Comm_pt->nproc();
2729 int my_rank = Comm_pt->my_rank();
2730
2731 // Need to clear the shared node scheme first
2732 Shared_node_pt.clear();
2733
2734 for (int d = 0; d < n_proc; d++)
2735 {
2736 // map of bools for whether the node has been shared,
2737 // initialised to 0 (false) for each domain d
2738 std::map<Node*, bool> node_shared;
2739
2740 // For all domains lower than the current domain: Do halos first
2741 // then haloed, to ensure correct order in lookup scheme from
2742 // the other side
2743 if (d < my_rank)
2744 {
2745 // Get the nodes from the halo elements first
2747 unsigned nhalo_elem = halo_elem_pt.size();
2748
2749 for (unsigned e = 0; e < nhalo_elem; e++)
2750 {
2751 // Get finite element
2752 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
2753 if (el_pt != 0)
2754 {
2755 // Loop over nodes
2756 unsigned nnod = el_pt->nnode();
2757 for (unsigned j = 0; j < nnod; j++)
2758 {
2759 Node* nod_pt = el_pt->node_pt(j);
2760
2761 // Add it as a shared node from current domain
2762 if (!node_shared[nod_pt])
2763 {
2764 this->add_shared_node_pt(d, nod_pt);
2765 node_shared[nod_pt] = true;
2766 }
2767
2768 } // end loop over nodes
2769 }
2770
2771 } // end loop over elements
2772
2773 // Now get any left over nodes on the haloed elements
2775 unsigned nhaloed_elem = haloed_elem_pt.size();
2776
2777 for (unsigned e = 0; e < nhaloed_elem; e++)
2778 {
2779 // Get element
2781 dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
2782 if (el_pt != 0)
2783 {
2784 // Loop over the nodes
2785 unsigned nnod = el_pt->nnode();
2786 for (unsigned j = 0; j < nnod; j++)
2787 {
2788 Node* nod_pt = el_pt->node_pt(j);
2789
2790 // Add it as a shared node from current domain
2791 if (!node_shared[nod_pt])
2792 {
2793 this->add_shared_node_pt(d, nod_pt);
2794 node_shared[nod_pt] = true;
2795 }
2796
2797 } // end loop over nodes
2798 }
2799 } // end loop over elements
2800 }
2801
2802 // If the domain is bigger than the current rank: Do haloed first
2803 // then halo, to ensure correct order in lookup scheme from
2804 // the other side
2805 if (d > my_rank)
2806 {
2807 // Get the nodes from the haloed elements first
2809 unsigned nhaloed_elem = haloed_elem_pt.size();
2810
2811 for (unsigned e = 0; e < nhaloed_elem; e++)
2812 {
2813 // Get element
2815 dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
2816 if (el_pt != 0)
2817 {
2818 // Loop over nodes
2819 unsigned nnod = el_pt->nnode();
2820 for (unsigned j = 0; j < nnod; j++)
2821 {
2822 Node* nod_pt = el_pt->node_pt(j);
2823
2824 // Add it as a shared node from current domain
2825 if (!node_shared[nod_pt])
2826 {
2827 this->add_shared_node_pt(d, nod_pt);
2828 node_shared[nod_pt] = true;
2829 }
2830
2831 } // end loop over nodes
2832 }
2833 } // end loop over elements
2834
2835 // Now get the nodes from any halo elements left over
2837 unsigned nhalo_elem = halo_elem_pt.size();
2838
2839 for (unsigned e = 0; e < nhalo_elem; e++)
2840 {
2841 // Get element
2842 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
2843 if (el_pt != 0)
2844 {
2845 // Loop over nodes
2846 unsigned nnod = el_pt->nnode();
2847 for (unsigned j = 0; j < nnod; j++)
2848 {
2849 Node* nod_pt = el_pt->node_pt(j);
2850
2851 // Add it as a shared node from current domain
2852 if (!node_shared[nod_pt])
2853 {
2854 this->add_shared_node_pt(d, nod_pt);
2855 node_shared[nod_pt] = true;
2856 }
2857
2858 } // end loop over nodes
2859 }
2860 } // end loop over elements
2861
2862 } // end if (d ...)
2863
2864 } // end loop over processes
2865
2866
2867 double t_end = 0.0;
2869 {
2871 oomph_info << "Time for identification of shared nodes: "
2872 << t_end - t_start << std::endl;
2873 oomph_info.stream_pt()->flush();
2874 }
2875 }
2876
2877 //========================================================================
2878 /// Synchronise shared node lookup schemes to cater for the
2879 /// the case where:
2880 /// (1) a certain node on the current processor is halo with proc p
2881 /// (i.e. its non-halo counterpart lives on processor p)
2882 /// (2) that node also exists (also as a halo) on another processor
2883 /// (q, say) where its non-halo counter part is also known to be
2884 /// on processor p.
2885 /// However, without calling this function the current processor does not
2886 /// necessarily know that it shares a node with processor q. This
2887 /// information can be required, e.g. when synchronising hanging node
2888 /// schemes over all processors.
2889 //========================================================================
2891 {
2892 double t_start = 0.0;
2894 {
2896 }
2897
2898 double tt_start = 0.0;
2899 double tt_end = 0.0;
2901 {
2903 }
2904
2905 // Storage for current processor and number of processors
2906 int n_proc = Comm_pt->nproc();
2907 int my_rank = Comm_pt->my_rank();
2908
2909
2910#ifdef PARANOID
2911 // Has some twit filled in shared nodes with own process?!
2912 // Check at start of function
2913 if (Shared_node_pt[my_rank].size() != 0)
2914 {
2915 throw OomphLibError(
2916 "Processor has shared nodes with itself! Something's gone wrong!",
2919 }
2920#endif
2921
2922
2923 // Stage 1: Populate the set of of processor IDs that
2924 // each haloed node on current processor is haloed by.
2925 std::map<Node*, std::set<int>> shared_domain_set;
2926
2927 // Associate unique number with any haloed nodes on this processor
2928 std::map<Node*, unsigned> global_haloed_node_number_plus_one;
2929 unsigned global_haloed_count = 0;
2930
2931 // Loop over domains
2932 for (int d = 0; d < n_proc; d++)
2933 {
2934 // Don't talk to yourself
2935 if (d != my_rank)
2936 {
2937 // Loop over haloed nodes
2938 unsigned nnod_haloed = this->nhaloed_node(d);
2939 for (unsigned j = 0; j < nnod_haloed; j++)
2940 {
2941 Node* nod_pt = this->haloed_node_pt(d, j);
2942 shared_domain_set[nod_pt].insert(d);
2944 {
2948 }
2949 }
2950 }
2951 }
2952
2954 {
2956 oomph_info << "Time for initial classification in "
2957 "Mesh::synchronise_shared_nodes(): "
2958 << tt_end - tt_start << std::endl;
2960 }
2961
2962
2963 // Stage 2: All-to-all communication to inform all processors that
2964 // hold halo nodes with current processor about all domains that the current
2965 // processor shares nodes with [This will allow the other processors to add
2966 // these nodes to their shared node lookup schemes with processors
2967 // that they currently "can't see"].
2968
2969 // Data to be sent to each processor
2971
2972 // Storage for all values to be sent to all processors
2974
2975 // Start location within send_data for data to be sent to each processor
2977
2978
2979 // Loop over haloed nodes with other domains
2980 for (int domain = 0; domain < n_proc; domain++)
2981 {
2982 // Set the offset for the current processor
2984
2985 // Every processor works on haloed nodes with proc "domain" and
2986 // sends associations across to that domain. No need to talk to
2987 // yourself...
2988 if (domain != my_rank)
2989 {
2990 // Send total number of global haloed nodes
2991 // send_data.push_back(global_haloed_count);
2992
2993 // Loop over haloed nodes
2994 unsigned nnod_haloed = this->nhaloed_node(domain);
2995 for (unsigned j = 0; j < nnod_haloed; j++)
2996 {
2997 Node* nod_pt = this->haloed_node_pt(domain, j);
2998
2999 // Send global ID of haloed node
3001
3002 // Get the set of domains that halo this node
3004
3005 // Insert number of shared domains into send data
3007 send_data.push_back(n_shared_domains);
3008
3009 // Add shared domains
3010 for (std::set<int>::iterator it = tmp_shared_domain_set.begin();
3011 it != tmp_shared_domain_set.end();
3012 it++)
3013 {
3014 send_data.push_back(*it);
3015 }
3016 }
3017 }
3018
3019 // Find the number of data added to the vector
3021 }
3022
3023
3024 // Storage for the number of data to be received from each processor
3026
3027 // Now send numbers of data to be sent between all processors
3029 &send_n[0], 1, MPI_INT, &receive_n[0], 1, MPI_INT, Comm_pt->mpi_comm());
3030
3031
3032 // We now prepare the data to be received
3033 // by working out the displacements from the received data
3035 int receive_data_count = 0;
3036 for (int rank = 0; rank < n_proc; ++rank)
3037 {
3038 // Displacement is number of data received so far
3041 }
3042
3043 // Now resize the receive buffer for all data from all processors
3044 // Make sure that it has a size of at least one
3045 if (receive_data_count == 0)
3046 {
3048 }
3050
3051 // Make sure that the send buffer has size at least one
3052 // so that we don't get a segmentation fault
3053 if (send_data.size() == 0)
3054 {
3055 send_data.resize(1);
3056 }
3057
3058 // Now send the data between all the processors
3060 &send_n[0],
3063 &receive_data[0],
3064 &receive_n[0],
3067 Comm_pt->mpi_comm());
3068
3069
3071 {
3073 oomph_info << "Time for alltoall in Mesh::synchronise_shared_nodes(): "
3074 << tt_end - tt_start << std::endl;
3076 }
3077
3078
3080 {
3081 oomph_info << "Starting vector to set conversion in "
3082 << "Mesh::synchronise_shared_nodes() for a total of "
3083 << nshared_node() << " nodes\n";
3085 }
3086
3087
3088 // Copy vector-based representation of shared nodes into
3089 // sets for faster search
3091 for (int d = 0; d < n_proc; d++)
3092 {
3093 unsigned n_vector = Shared_node_pt[d].size();
3094 for (unsigned i = 0; i < n_vector; i++)
3095 {
3096 shared_node_set[d].insert(Shared_node_pt[d][i]);
3097 }
3098 }
3099
3100
3102 {
3105 << "Time for vector to set in Mesh::synchronise_shared_nodes(): "
3106 << tt_end - tt_start << std::endl;
3108 }
3109
3110
3111 // Now use the received data
3112 for (int send_rank = 0; send_rank < n_proc; send_rank++)
3113 {
3114 // Don't bother to do anything for the processor corresponding to the
3115 // current processor or if no data were received from this processor
3116 if ((send_rank != my_rank) && (receive_n[send_rank] != 0))
3117 {
3118 // Counter for the data within the large array
3120
3121 // Read total number of global haloed nodes
3122 // unsigned n_global_haloed_nodes_on_send_proc=receive_data[count++];
3123
3124 // Storage for nodes and associated domains:
3125 // domains_map[global_haloed_node_number].first = node
3126 // domains_map[global_haloed_node_number].second = set of domains
3127 // this node is
3128 // associated with.
3129 std::map<unsigned, std::pair<Node*, std::set<unsigned>>> domains_map;
3130
3131 // Loop over halo nodes with sending processor
3132 unsigned nnod_halo = this->nhalo_node(send_rank);
3133 for (unsigned j = 0; j < nnod_halo; j++)
3134 {
3135 Node* nod_pt = this->halo_node_pt(send_rank, j);
3136
3137 // Read unique ID of haloed node on send proc
3139
3140 // Read out number of shared domains into send data
3141 unsigned n_shared_domains = receive_data[count++];
3142
3143 // Prepare set of domains
3144 std::set<unsigned> domain_set;
3145
3146 // Read 'em
3147 for (unsigned i = 0; i < n_shared_domains; i++)
3148 {
3150
3151 // Record in set
3152 domain_set.insert(shared_domain);
3153 }
3154
3155 // Add entry:
3157 std::make_pair(nod_pt, domain_set);
3158
3159 } // end of loop over halo nodes
3160
3161
3162 // Now add new shared nodes in order
3163#ifdef PARANOID
3164 int previous_one = -1;
3165#endif
3166 for (std::map<unsigned, std::pair<Node*, std::set<unsigned>>>::iterator
3167 it = domains_map.begin();
3168 it != domains_map.end();
3169 it++)
3170 {
3171 // Super-paranoid: Check that the map actually sorted entries
3172 // by key (as it should)
3173#ifdef PARANOID
3174 if (int((*it).first) < previous_one)
3175 {
3176 std::ostringstream error_stream;
3177 error_stream << "Map did not store entries in order of key\n "
3178 << "Current key: " << (*it).first
3179 << "; previous one: " << previous_one << "\n"
3180 << "Need to rewrite this...\n";
3181 throw OomphLibError(error_stream.str(),
3184 }
3185 previous_one = (*it).first;
3186#endif
3187
3188 // Extract node
3189 Node* nod_pt = (*it).second.first;
3190
3191 // Extract set of domains
3192 std::set<unsigned> domain_set((*it).second.second);
3193
3194 // Read 'em
3195 for (std::set<unsigned>::iterator itt = domain_set.begin();
3196 itt != domain_set.end();
3197 itt++)
3198 {
3199 int shared_domain = (*itt);
3200
3201 // No need to add shared nodes with oneself!
3202 if (shared_domain != my_rank)
3203 {
3204 // Is node already listed in shared node scheme? Find it
3205 // and get iterator to entry
3206 std::set<Node*>::iterator ittt =
3208
3209 // If it's not in there already iterator points to end of
3210 // set
3212 {
3213 // Now add it
3215
3216 // Update set
3218 }
3219 }
3220 }
3221 }
3222
3223 } // end of any data is received and ignore own domain
3224
3225 } // end of loop over send ranks
3226
3227
3228#ifdef PARANOID
3229 // Has some twit filled in shared nodes with own process?!
3230 // Check at end pf function.
3231 if (Shared_node_pt[my_rank].size() != 0)
3232 {
3233 throw OomphLibError(
3234 "Processor has shared nodes with itself! Something's gone wrong!",
3237 }
3238#endif
3239
3240
3242 {
3245 << "Time for final processing in Mesh::synchronise_shared_nodes(): "
3246 << tt_end - tt_start << std::endl;
3248 }
3249
3251 {
3252 double t_end = TimingHelpers::timer();
3253 oomph_info << "Total time for Mesh::synchronise_shared_nodes(): "
3254 << t_end - t_start << std::endl;
3255 }
3256 }
3257
3258
3259 //========================================================================
3260 /// Classify all halo and haloed information in the mesh
3261 //========================================================================
3263 const bool& report_stats)
3264 {
3265 // MemoryUsage::doc_memory_usage(
3266 // "at beginning of Mesh::classify_halo_and_haloed_nodes");
3267
3268 double t_start = 0.0;
3270 {
3272 }
3273
3274 double tt_start = 0.0;
3276 {
3278 }
3279
3280 // Set up shared nodes scheme
3282
3283 // MemoryUsage::doc_memory_usage("after setup shared node scheme");
3284
3285 double tt_end = 0.0;
3287 {
3289 oomph_info << "Time for Mesh::setup_shared_node_scheme() "
3290 << " Mesh::classify_halo_and_haloed_nodes(): "
3291 << tt_end - tt_start << std::endl;
3292 oomph_info.stream_pt()->flush();
3294 }
3295
3296
3297 // Wipe existing storage schemes for halo(ed) nodes
3298 Halo_node_pt.clear();
3299 Haloed_node_pt.clear();
3300
3301 // Storage for number of processors and current processor
3302 int n_proc = Comm_pt->nproc();
3303 int my_rank = Comm_pt->my_rank();
3305
3306 // Determine which processors the nodes are associated with
3307 // and hence who's in charge
3308 std::map<Data*, std::set<unsigned>> processors_associated_with_data;
3309 std::map<Data*, unsigned> processor_in_charge;
3310
3311 // Loop over all processors and associate any nodes on the halo
3312 // elements involved with that processor
3313 for (int domain = 0; domain < n_proc; domain++)
3314 {
3315 // Get vector of halo elements by copy operation
3317
3318 // Loop over halo elements associated with this adjacent domain
3319 unsigned nelem = halo_elem_pt.size();
3320 for (unsigned e = 0; e < nelem; e++)
3321 {
3322 // Get element only have nodes if a finite element
3324 dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
3325 if (finite_el_pt != 0)
3326 {
3327 // Loop over nodes
3328 unsigned nnod = finite_el_pt->nnode();
3329 for (unsigned j = 0; j < nnod; j++)
3330 {
3332 // Associate node with this domain
3334
3335 // Do the same if the node is solid
3336 SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3337 if (solid_nod_pt != 0)
3338 {
3340 ->variable_position_pt()]
3341 .insert(domain);
3342 }
3343 }
3344 }
3345 }
3346 }
3347
3348
3349 // Loop over all [non-halo] elements and associate their nodes
3350 // with current procesor
3351 unsigned nelem = this->nelement();
3352 for (unsigned e = 0; e < nelem; e++)
3353 {
3355 dynamic_cast<FiniteElement*>(this->element_pt(e));
3356
3357 // Only visit non-halos and finite elements
3358 if ((finite_el_pt != 0) && (!finite_el_pt->is_halo()))
3359 {
3360 // Loop over nodes
3361 unsigned nnod = finite_el_pt->nnode();
3362 for (unsigned j = 0; j < nnod; j++)
3363 {
3365
3366 // Associate this node with current processor
3368
3369 // do the same if we have a SolidNode
3370 SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3371 if (solid_nod_pt != 0)
3372 {
3374 ->variable_position_pt()]
3375 .insert(my_rank);
3376 }
3377 }
3378 }
3379 }
3380
3381
3383 {
3386 << "Time for setup loops in Mesh::classify_halo_and_haloed_nodes: "
3387 << tt_end - tt_start << std::endl;
3388 oomph_info.stream_pt()->flush();
3390 }
3391
3392 // MemoryUsage::doc_memory_usage("after setup loops");
3393
3394 // At this point we need to "synchronise" the nodes on halo(ed) elements
3395 // so that the processors_associated_with_data agrees for the same node
3396 // on all processors. Strategy: All local nodes have just had their
3397 // association recorded. Now loop over all haloed elements
3398 // and send the association of their nodes to the corresponding
3399 // halo processors where they update/augment the association of the
3400 // nodes of the corresponding halo elements.
3401
3402 // Loop over all domains
3403 for (int d = 0; d < n_proc; d++)
3404 {
3405 // Prepare vector to send/receive
3407
3408 if (d != my_rank)
3409 {
3410 // Communicate the processors associated with data on haloed elements
3411
3412 // Get haloed elements
3414
3415 // Initialise counter for this haloed layer
3416 unsigned count_data = 0;
3417
3418 // Loop over haloed elements
3419 unsigned n_haloed_elem = haloed_elem_pt.size();
3420 for (unsigned e = 0; e < n_haloed_elem; e++)
3421 {
3422 // Only nodes in finite elements
3424 dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
3425 if (haloed_el_pt != 0)
3426 {
3427 // Loop over nodes
3428 unsigned n_node = haloed_el_pt->nnode();
3429 for (unsigned j = 0; j < n_node; j++)
3430 {
3432
3433 // Number of processors associated with this node
3435
3436 // This number needs to be sent
3438 count_data++;
3439
3440 // Now add the process IDs associated to the vector to be sent
3441 std::set<unsigned> procs_set =
3443 for (std::set<unsigned>::iterator it = procs_set.begin();
3444 it != procs_set.end();
3445 it++)
3446 {
3448 count_data++;
3449 }
3450 }
3451 }
3452 }
3453
3454
3455 // Send the information
3456 MPI_Send(&count_data, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm());
3457 if (count_data != 0)
3458 {
3460 count_data,
3462 d,
3463 1,
3464 Comm_pt->mpi_comm());
3465 }
3466 }
3467 else
3468 {
3469 // Receive the processors associated with data onto halo elements
3470 for (int dd = 0; dd < n_proc; dd++)
3471 {
3472 if (dd != my_rank) // (my_rank=d)
3473 {
3474 // We will be looping over the halo elements with process dd
3476 unsigned n_halo_elem = halo_elem_pt.size();
3477 unsigned count_data = 0;
3479 1,
3481 dd,
3482 0,
3483 Comm_pt->mpi_comm(),
3484 &status);
3485 if (count_data != 0)
3486 {
3489 count_data,
3491 dd,
3492 1,
3493 Comm_pt->mpi_comm(),
3494 &status);
3495
3496 // Reset counter and loop through nodes on halo elements
3497 count_data = 0;
3498 for (unsigned e = 0; e < n_halo_elem; e++)
3499 {
3501 dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
3502 if (halo_el_pt != 0)
3503 {
3504 unsigned n_node = halo_el_pt->nnode();
3505 for (unsigned j = 0; j < n_node; j++)
3506 {
3508
3509 // Get number of processors associated with data that was
3510 // sent
3511 unsigned n_assoc =
3513 count_data++;
3514
3515 for (unsigned i_assoc = 0; i_assoc < n_assoc; i_assoc++)
3516 {
3517 // Get the process ID
3518 unsigned sent_domain =
3520 [count_data];
3521 count_data++;
3522
3523 // Add it to this processor's list of IDs
3525 sent_domain);
3526
3527 // If the node is solid then add the ID to the solid data
3529 dynamic_cast<SolidNode*>(nod_pt);
3530 if (solid_nod_pt != 0)
3531 {
3533 [solid_nod_pt->variable_position_pt()]
3534 .insert(sent_domain);
3535 }
3536 }
3537 }
3538 }
3539 }
3540 }
3541 }
3542 }
3543 }
3544 }
3545
3547 {
3550 << "Time for pt2pt send/recv in Mesh::classify_halo_and_haloed_nodes: "
3551 << tt_end - tt_start << std::endl;
3552 oomph_info.stream_pt()->flush();
3554 }
3555
3556
3557 // MemoryUsage::doc_memory_usage("after pt2pt send/recv");
3558
3559 // Loop over all nodes on the present processor and put the highest-numbered
3560 // processor associated with each node "in charge" of the node
3561 unsigned nnod = this->nnode();
3562 for (unsigned j = 0; j < nnod; j++)
3563 {
3564 Node* nod_pt = this->node_pt(j);
3565
3566 // Reset halo status of node to false
3568
3569 // If it's a SolidNode then the halo status of the data
3570 // associated with that must also be reset to false
3571 SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3572 if (solid_nod_pt != 0)
3573 {
3574 solid_nod_pt->variable_position_pt()->set_nonhalo();
3575 }
3576
3577 // Now put the highest-numbered one in charge
3578 unsigned proc_max = 0;
3579 std::set<unsigned> procs_set = processors_associated_with_data[nod_pt];
3580 for (std::set<unsigned>::iterator it = procs_set.begin();
3581 it != procs_set.end();
3582 it++)
3583 {
3584 if (*it > proc_max) proc_max = *it;
3585 }
3587
3588 // Do the same if we have a SolidNode
3589 if (solid_nod_pt != 0)
3590 {
3591 // Now put the highest-numbered one in charge
3592 unsigned proc_max_solid = 0;
3593 std::set<unsigned> procs_set_solid =
3594 processors_associated_with_data[solid_nod_pt->variable_position_pt()];
3595 for (std::set<unsigned>::iterator it = procs_set_solid.begin();
3596 it != procs_set_solid.end();
3597 it++)
3598 {
3600 }
3601 processor_in_charge[solid_nod_pt->variable_position_pt()] =
3603 }
3604 }
3605
3606
3607 // First stab at determining halo nodes. They are located on the halo
3608 // elements and the processor in charge differs from the
3609 // current processor
3610
3611 // Only count nodes once (map is initialised to 0 = false)
3612 std::map<Node*, bool> done;
3613
3614 // Loop over all processors
3615 for (int domain = 0; domain < n_proc; domain++)
3616 {
3617 // Get vector of halo elements by copy operation
3619
3620 // Loop over halo elements associated with this adjacent domain
3621 unsigned nelem = halo_elem_pt.size();
3622
3623 for (unsigned e = 0; e < nelem; e++)
3624 {
3625 // Get element
3627
3628 // Can if be cast to a finite element
3629 FiniteElement* finite_el_pt = dynamic_cast<FiniteElement*>(el_pt);
3630 if (finite_el_pt != 0)
3631 {
3632 // Loop over nodes
3633 unsigned nnod = finite_el_pt->nnode();
3634 for (unsigned j = 0; j < nnod; j++)
3635 {
3637
3638 // Have we done this node already?
3639 if (!done[nod_pt])
3640 {
3641 // Is the other processor/domain in charge of this node?
3643
3644 if (proc_in_charge != my_rank)
3645 {
3646 // To keep the order of the nodes consistent with that
3647 // in the haloed node lookup scheme, only
3648 // allow it to be added when the current domain is in charge
3649 if (proc_in_charge == int(domain))
3650 {
3651 // Add it as being halo node whose non-halo counterpart
3652 // is located on processor proc_in_charge
3653 this->add_halo_node_pt(proc_in_charge, nod_pt);
3654
3655 // The node itself needs to know it is a halo
3657
3658 // If it's a SolidNode then the data associated with that
3659 // must also be halo
3660 SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3661 if (solid_nod_pt != 0)
3662 {
3663 solid_nod_pt->variable_position_pt()->set_halo(
3665 }
3666
3667 // We're done with this node
3668 done[nod_pt] = true;
3669 }
3670 }
3671 }
3672 }
3673
3674 } // End of finite element case
3675
3676 // Now make sure internal data on halo elements is also halo
3677 unsigned nintern_data = el_pt->ninternal_data();
3678 for (unsigned iintern = 0; iintern < nintern_data; iintern++)
3679 {
3681 }
3682 }
3683 }
3684
3685
3686 // First stab at determining haloed nodes. They are located on the haloed
3687 // elements and the processor in charge is the current processor
3688
3689 // Loop over processors that share haloes with the current one
3690 for (int domain = 0; domain < n_proc; domain++)
3691 {
3692 // Only count nodes once (map is initialised to 0 = false)
3693 std::map<Node*, bool> node_done;
3694
3695 // Get vector of haloed elements by copy operation
3697 this->haloed_element_pt(domain));
3698
3699 // Loop over haloed elements associated with this adjacent domain
3700 unsigned nelem = haloed_elem_pt.size();
3701
3702 for (unsigned e = 0; e < nelem; e++)
3703 {
3704 // Get element
3706
3707 // Can it be cast to a finite element
3708 FiniteElement* finite_el_pt = dynamic_cast<FiniteElement*>(el_pt);
3709 if (finite_el_pt != 0)
3710 {
3711 // Loop over nodes
3712 unsigned nnod = finite_el_pt->nnode();
3713 for (unsigned j = 0; j < nnod; j++)
3714 {
3716
3717 // Have we done this node already?
3718 if (!node_done[nod_pt])
3719 {
3720 // Is the current processor/domain in charge of this node?
3722
3723 if (proc_in_charge == my_rank)
3724 {
3725 // Add it as being haloed from specified domain
3727
3728 // We're done with this node
3729 node_done[nod_pt] = true;
3730 }
3731 }
3732 }
3733 }
3734 }
3735 }
3736
3737
3739 {
3742 << "Time for first classific in Mesh::classify_halo_and_haloed_nodes: "
3743 << tt_end - tt_start << std::endl;
3744 oomph_info.stream_pt()->flush();
3746 }
3747
3748 // MemoryUsage::doc_memory_usage("after first classific");
3749
3750
3751 // Find any overlooked halo nodes: These are any nodes on the halo/haloed
3752 // elements (i.e. precisely the nodes currently contained in the shared
3753 // node scheme) that have not been classified as haloes (yet) though they
3754 // should have been because another processor is in charge of them.
3755 // This arises when the "overlooked halo node" is not part of the
3756 // halo/haloed element lookup scheme between the current processor
3757 // and the processor that holds the non-halo counterpart. E.g. we're
3758 // on proc 3. A node at the very edge of its halo layer also exists
3759 // on procs 0 and 1 with 1 being "in charge". However, the node in
3760 // question is not part of the halo/haloed element lookup scheme between
3761 // processor 1 and 3 so in the classification performed above, we never
3762 // visit it so it's overlooked. The code below rectifies this by going
3763 // through the intermediate processor (here proc 0) that contains the node
3764 // in lookup schemes with the halo processor (here proc 3, this one) and the
3765 // one that contains the non-halo counterpart (here proc 1).
3766
3767
3768 // Counter for number of overlooked halos (if there aren't any we don't
3769 // need any comms below)
3770 unsigned n_overlooked_halo = 0;
3771
3772 // Record previously overlooked halo nodes so they can be
3773 // added to the shared node lookup scheme (in a consistent order) below
3775
3776 // Record previously overlooked haloed nodes so they can be
3777 // added to the shared node lookup scheme (in a consistent order) below
3779
3780 // Data to be sent to each processor
3782
3783 // Storage for all values to be sent to all processors
3785
3786 // Start location within send_data for data to be sent to each processor
3788
3789 // Check missing ones
3790 for (int domain = 0; domain < n_proc; domain++)
3791 {
3792 // Set the offset for the current processor
3794
3795 // Don't bother to do anything if the processor in the loop is the
3796 // current processor
3797 if (domain != my_rank)
3798 {
3799 unsigned nnod = nshared_node(domain);
3800 for (unsigned j = 0; j < nnod; j++)
3801 {
3803
3804 // Is a different-numbered processor/domain in charge of this node?
3806 if ((proc_in_charge != my_rank) && !(nod_pt->is_halo()))
3807 {
3808 // Add it as being halo node whose non-halo counterpart
3809 // is located on processor proc_in_charge
3810 this->add_halo_node_pt(proc_in_charge, nod_pt);
3811
3812 // We have another one...
3815
3816 // The node itself needs to know it is a halo
3818
3819 // If it's a SolidNode then the data associated with that
3820 // must also be halo
3821 SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
3822 if (solid_nod_pt != 0)
3823 {
3824 solid_nod_pt->variable_position_pt()->set_halo(proc_in_charge);
3825 }
3826
3827 // Send shared node ID and processor in charge info to
3828 // "intermediate" processor (i.e. the processor that has
3829 // the lookup schemes to talk to both
3830 send_data.push_back(j);
3831 send_data.push_back(proc_in_charge);
3832 }
3833 }
3834 }
3835
3836 // End of data
3837 send_data.push_back(-1);
3838
3839 // Find the number of data added to the vector
3841 }
3842
3843
3844 // Check if any processor has stumbled across overlooked halos
3845 // (if not we can omit the comms below)
3846 unsigned global_max_n_overlooked_halo = 0;
3849 1,
3851 MPI_MAX,
3852 Comm_pt->mpi_comm());
3853
3854
3855 oomph_info << "Global max number of overlooked haloes: "
3856 << global_max_n_overlooked_halo << std::endl;
3857
3859 {
3861 oomph_info << "Time for setup 1st alltoalls in "
3862 "Mesh::classify_halo_and_haloed_nodes: "
3863 << tt_end - tt_start << std::endl;
3864 oomph_info.stream_pt()->flush();
3866 }
3867
3868 // MemoryUsage::doc_memory_usage("after setup 1st alltoalls");
3869
3870 // Any comms needed?
3872 {
3873 // Storage for the number of data to be received from each processor
3875
3876 // Now send numbers of data to be sent between all processors
3878 &send_n[0], 1, MPI_INT, &receive_n[0], 1, MPI_INT, Comm_pt->mpi_comm());
3879
3880
3882 {
3885 << "Time for 1st alltoall in Mesh::classify_halo_and_haloed_nodes: "
3886 << tt_end - tt_start << std::endl;
3887 oomph_info.stream_pt()->flush();
3889 }
3890
3891 // MemoryUsage::doc_memory_usage("after 1st alltoall");
3892
3893
3894 // We now prepare the data to be received
3895 // by working out the displacements from the received data
3897 int receive_data_count = 0;
3898 for (int rank = 0; rank < n_proc; ++rank)
3899 {
3900 // Displacement is number of data received so far
3903 }
3904
3905 // Now resize the receive buffer for all data from all processors
3906 // Make sure that it has a size of at least one
3907 if (receive_data_count == 0)
3908 {
3910 }
3912
3913 // Make sure that the send buffer has size at least one
3914 // so that we don't get a segmentation fault
3915 if (send_data.size() == 0)
3916 {
3917 send_data.resize(1);
3918 }
3919
3920 // Now send the data between all the processors
3922 &send_n[0],
3924 MPI_INT,
3925 &receive_data[0],
3926 &receive_n[0],
3928 MPI_INT,
3929 Comm_pt->mpi_comm());
3930
3931
3933 {
3936 << "Time for 2nd alltoall in Mesh::classify_halo_and_haloed_nodes: "
3937 << tt_end - tt_start << std::endl;
3938 oomph_info.stream_pt()->flush();
3940 }
3941
3942 // MemoryUsage::doc_memory_usage("after 2nd alltoall");
3943
3944 // Provide storage for data to be sent to processor that used to be
3945 // in charge
3947
3948 // Now use the received data
3949 for (int send_rank = 0; send_rank < n_proc; send_rank++)
3950 {
3951 // Don't bother to do anything for the processor corresponding to the
3952 // current processor or if no data were received from this processor
3953 if ((send_rank != my_rank) && (receive_n[send_rank] != 0))
3954 {
3955 // Counter for the data within the large array
3957
3958 // Unpack until we reach "end of data" indicator (-1)
3959 while (true)
3960 {
3961 // Read next entry
3962 int next_one = receive_data[count++];
3963
3964 if (next_one == -1)
3965 {
3966 break;
3967 }
3968 else
3969 {
3970 // Shared halo node number in lookup scheme between intermediate
3971 // (i.e. this) processor and the one that has the overlooked halo
3972 unsigned j = unsigned(next_one);
3973
3974 // Processor in charge:
3976
3977 // Find actual node from shared node lookup scheme
3979
3980
3981 // Note: This search seems relatively cheap
3982 // and in the tests done, did not benefit
3983 // from conversion to map-based search
3984 // as in
3985 // TreeBasedRefineableMeshBase::synchronise_hanging_nodes()
3986
3987
3988 // Locate its index in lookup scheme with proc in charge
3989 bool found = false;
3990 unsigned nnod = nshared_node(proc_in_charge);
3991 for (unsigned jj = 0; jj < nnod; jj++)
3992 {
3994 {
3995 found = true;
3996
3997 // Shared node ID in lookup scheme with intermediate (i.e.
3998 // this) processor
4000
4001 // Processor that holds the overlooked halo node
4003 send_rank);
4004
4005 break;
4006 }
4007 }
4008 if (!found)
4009 {
4010 std::ostringstream error_stream;
4012 << "Failed to find node that is shared node " << j
4013 << " (with processor " << send_rank
4014 << ") \n in shared node lookup scheme with processor "
4015 << proc_in_charge << " which is in charge.\n";
4016 throw OomphLibError(error_stream.str(),
4019 }
4020 }
4021 }
4022 }
4023 } // End of data is received
4024
4025
4027 {
4029 oomph_info << "Time for 1st setup 3rd alltoall in "
4030 "Mesh::classify_halo_and_haloed_nodes: "
4031 << tt_end - tt_start << std::endl;
4032 oomph_info.stream_pt()->flush();
4034 }
4035
4036 // MemoryUsage::doc_memory_usage("after 1st setup for 3rd alltoall");
4037
4038 // Data to be sent to each processor
4040
4041 // Storage for all values to be sent to all processors
4043
4044 // Start location within send_data for data to be sent to each processor
4046
4047 // Collate data
4048 for (int domain = 0; domain < n_proc; domain++)
4049 {
4050 // Set the offset for the current processor
4052
4053 // Don't bother to do anything if the processor in the loop is the
4054 // current processor
4055 if (domain != my_rank)
4056 {
4058 for (unsigned j = 0; j < n; j++)
4059 {
4061 }
4062 }
4063
4064 // End of data
4065 all_send_data.push_back(-1);
4066
4067 // Find the number of data added to the vector
4070 }
4071
4072
4074 {
4076 oomph_info << "Time for 2nd setup 3rd alltoall in "
4077 "Mesh::classify_halo_and_haloed_nodes: "
4078 << tt_end - tt_start << std::endl;
4079 oomph_info.stream_pt()->flush();
4081 }
4082
4083 // MemoryUsage::doc_memory_usage("after 2nd setup 3rd alltoall");
4084
4085 // Storage for the number of data to be received from each processor
4087
4088 // Now send numbers of data to be sent between all processors
4090 1,
4091 MPI_INT,
4092 &all_receive_n[0],
4093 1,
4094 MPI_INT,
4095 Comm_pt->mpi_comm());
4096
4097
4099 {
4102 << "Time for 3rd alltoall in Mesh::classify_halo_and_haloed_nodes: "
4103 << tt_end - tt_start << std::endl;
4104 oomph_info.stream_pt()->flush();
4106 }
4107
4108 // MemoryUsage::doc_memory_usage("after 3rd alltoall");
4109
4110 // We now prepare the data to be received
4111 // by working out the displacements from the received data
4113 int all_receive_data_count = 0;
4114
4115 for (int rank = 0; rank < n_proc; ++rank)
4116 {
4117 // Displacement is number of data received so far
4120 }
4121
4122 // Now resize the receive buffer for all data from all processors
4123 // Make sure that it has a size of at least one
4124 if (all_receive_data_count == 0)
4125 {
4127 }
4129
4130 // Make sure that the send buffer has size at least one
4131 // so that we don't get a segmentation fault
4132 if (all_send_data.size() == 0)
4133 {
4134 all_send_data.resize(1);
4135 }
4136
4137 // Now send the data between all the processors
4139 &all_send_n[0],
4141 MPI_INT,
4142 &all_receive_data[0],
4143 &all_receive_n[0],
4145 MPI_INT,
4146 Comm_pt->mpi_comm());
4147
4148
4150 {
4153 << "Time for 4th alltoall in Mesh::classify_halo_and_haloed_nodes: "
4154 << tt_end - tt_start << std::endl;
4155 oomph_info.stream_pt()->flush();
4157 }
4158
4159 // MemoryUsage::doc_memory_usage("after 4th alltoall");
4160
4161
4162 // Now use the received data
4163 for (int send_rank = 0; send_rank < n_proc; send_rank++)
4164 {
4165 // Don't bother to do anything for the processor corresponding to the
4166 // current processor or if no data were received from this processor
4167 if ((send_rank != my_rank) && (all_receive_n[send_rank] != 0))
4168 {
4169 // Counter for the data within the large array
4171
4172 // Unpack until we reach "end of data" indicator (-1)
4173 while (true)
4174 {
4175 // Read next entry
4177
4178 if (next_one == -1)
4179 {
4180 break;
4181 }
4182 else
4183 {
4184 // Shared node ID in lookup scheme with intermediate (sending)
4185 // processor
4186 unsigned j = unsigned(next_one);
4187
4188 // Get pointer to previously overlooked halo
4190
4191 // Proc where overlooked halo is
4193
4194 // Add it as being haloed from specified domain
4195 this->add_haloed_node_pt(proc_with_overlooked_halo, nod_pt);
4196
4197 // Record new haloed node so it an be added to the shared
4198 // node lookup scheme (in a consistent order) below.
4200 nod_pt);
4201 }
4202 }
4203 }
4204 } // End of data is received
4205
4207 {
4209 oomph_info << "Time for postproc 4th alltoall in "
4210 "Mesh::classify_halo_and_haloed_nodes: "
4211 << tt_end - tt_start << std::endl;
4212 oomph_info.stream_pt()->flush();
4214 }
4215
4216 // MemoryUsage::doc_memory_usage("after postprocess 4th alltoall");
4217
4218 // Now add previously overlooked halo/haloed nodes to shared node
4219 // lookup scheme in consistent order
4220 for (int d = 0; d < n_proc; d++)
4221 {
4222 // For all domains lower than the current domain: Do halos first
4223 // then haloed, to ensure correct order in lookup scheme from
4224 // the other side
4225 if (d < my_rank)
4226 {
4227 unsigned nnod = over_looked_halo_node_pt[d].size();
4228 for (unsigned j = 0; j < nnod; j++)
4229 {
4231 }
4233 for (unsigned j = 0; j < nnod; j++)
4234 {
4236 }
4237 }
4238 else if (d > my_rank)
4239 {
4240 unsigned nnod = over_looked_haloed_node_pt[d].size();
4241 for (unsigned j = 0; j < nnod; j++)
4242 {
4244 }
4246 for (unsigned j = 0; j < nnod; j++)
4247 {
4249 }
4250 }
4251 }
4252
4253
4254 // Doc stats
4255 if (report_stats)
4256 {
4257 // Report total number of halo(ed) and shared nodes for this process
4258 oomph_info << "BEFORE SYNCHRONISE SHARED NODES Processor " << my_rank
4259 << " holds " << this->nnode() << " nodes of which "
4260 << this->nhalo_node() << " are halo nodes \n while "
4261 << this->nhaloed_node() << " are haloed nodes, and "
4262 << this->nshared_node() << " are shared nodes." << std::endl;
4263
4264 // Report number of halo(ed) and shared nodes with each domain
4265 // from the current process
4266 for (int iproc = 0; iproc < n_proc; iproc++)
4267 {
4268 // Get vector of halo elements by copy operation
4270 this->halo_element_pt(iproc));
4272 this->haloed_element_pt(iproc));
4273 oomph_info << "With process " << iproc << ", there are "
4274 << this->nhalo_node(iproc) << " halo nodes, and "
4275 << std::endl
4276 << this->nhaloed_node(iproc) << " haloed nodes, and "
4277 << this->nshared_node(iproc) << " shared nodes"
4278 << std::endl
4279 << halo_elem_pt.size() << " halo elements and "
4280 << haloed_elem_pt.size() << " haloed elements\n";
4281 }
4282 }
4283
4284 // // Doc stats
4285 // if (report_stats)
4286 // {
4287 // // Report total number of halo(ed) and shared nodes for this process
4288 // oomph_info << "BEFORE SYNCHRONISE SHARED NODES Processor " <<
4289 // my_rank
4290 // << " holds " << this->nnode()
4291 // << " nodes of which " << this->nhalo_node()
4292 // << " are halo nodes \n while " << this->nhaloed_node()
4293 // << " are haloed nodes, and " << this->nshared_node()
4294 // << " are shared nodes." << std::endl;
4295
4296 // // Report number of halo(ed) and shared nodes with each domain
4297 // // from the current process
4298 // for (int iproc=0;iproc<n_proc;iproc++)
4299 // {
4300 // // Get vector of halo elements by copy operation
4301 // Vector<GeneralisedElement*>
4302 // halo_elem_pt(this->halo_element_pt(iproc));
4303 // Vector<GeneralisedElement*> haloed_elem_pt(
4304 // this->haloed_element_pt(iproc));
4305 // oomph_info << "With process " << iproc << ", there are "
4306 // << this->nhalo_node(iproc) << " halo nodes, and " <<
4307 // std::endl
4308 // << this->nhaloed_node(iproc) << " haloed nodes, and "
4309 // << this->nshared_node(iproc) << " shared nodes" <<
4310 // std::endl
4311 // << halo_elem_pt.size() << " halo elements and "
4312 // << haloed_elem_pt.size() << " haloed elements\n";
4313 // }
4314 // }
4315
4316 } // end if comms reqd because we encountered overlooked halo elements
4317
4318
4319 // MemoryUsage::doc_memory_usage("before sync halo nodes");
4320
4321 // Synchronise shared nodes
4323
4324 // MemoryUsage::doc_memory_usage("after sync halo nodes");
4325
4326#ifdef PARANOID
4327 // Has some twit filled in haloed nodes with own process?!
4328 if (Haloed_node_pt[my_rank].size() != 0)
4329 {
4330 throw OomphLibError(
4331 "Processor has haloed nodes with itself! Something's gone wrong!",
4334 }
4335 // Has some twit filled in halo nodes with own process?!
4336 if (Halo_node_pt[my_rank].size() != 0)
4337 {
4338 throw OomphLibError(
4339 "Processor has halo nodes with itself! Something's gone wrong!",
4342 }
4343 // Has some twit filled in root haloed elements with own process?!
4344 if (Root_haloed_element_pt[my_rank].size() != 0)
4345 {
4346 throw OomphLibError("Processor has root haloed elements with itself! "
4347 "Something's gone wrong!",
4350 }
4351 // Has some twit filled in root halo elements with own process?!
4352 if (Root_halo_element_pt[my_rank].size() != 0)
4353 {
4354 throw OomphLibError(
4355 "Processor has root halo elements with itself! Something's gone wrong!",
4358 }
4359#endif
4360
4361 // Doc stats
4362 if (report_stats)
4363 {
4364 // Report total number of halo(ed) and shared nodes for this process
4365 oomph_info << "Processor " << my_rank << " holds " << this->nnode()
4366 << " nodes of which " << this->nhalo_node()
4367 << " are halo nodes \n while " << this->nhaloed_node()
4368 << " are haloed nodes, and " << this->nshared_node()
4369 << " are shared nodes." << std::endl;
4370
4371 // Report number of halo(ed) and shared nodes with each domain
4372 // from the current process
4373 for (int iproc = 0; iproc < n_proc; iproc++)
4374 {
4375 // Get vector of halo elements by copy operation
4378 this->haloed_element_pt(iproc));
4379 oomph_info << "With process " << iproc << ", there are "
4380 << this->nhalo_node(iproc) << " halo nodes, and "
4381 << std::endl
4382 << this->nhaloed_node(iproc) << " haloed nodes, and "
4383 << this->nshared_node(iproc) << " shared nodes" << std::endl
4384 << halo_elem_pt.size() << " halo elements and "
4385 << haloed_elem_pt.size() << " haloed elements\n";
4386 }
4387 }
4388
4389
4390 // MemoryUsage::doc_memory_usage("before resize halo nodes");
4391
4392 // Now resize halo nodes if required (can be over-ruled from the outside
4393 // either by user (for (risky!) efficienty saving) or from overloaded
4394 // version of classify_... in refineable version of that function
4395 // where resize_halo_nodes() is called after synchronising hanging nodes.
4397 {
4399 }
4400
4401 // MemoryUsage::doc_memory_usage("after resize halo nodes");
4402
4404 {
4405 double t_end = TimingHelpers::timer();
4406 oomph_info << "Total time for Mesh::classify_halo_and_halo_nodes(): "
4407 << t_end - t_start << std::endl;
4408 oomph_info.stream_pt()->flush();
4409 }
4410
4411 // MemoryUsage::doc_memory_usage(
4412 // "at end of Mesh::classify_halo_and_halo_nodes()");
4413 }
4414
4415
4416 //========================================================================
4417 /// Helper function that resizes halo nodes to the same
4418 /// size as their non-halo counterparts if required. (A discrepancy
4419 /// can arise if a FaceElement that introduces additional unknowns
4420 /// are attached to a bulk element that shares a node with a haloed element.
4421 /// In that case the joint node between haloed and non-haloed element
4422 /// is resized on that processor but not on the one that holds the
4423 /// halo counterpart (because no FaceElement is attached to the halo
4424 /// element)
4425 //=========================================================================
4427 {
4428 double t_start = 0.0;
4430 {
4432 }
4433
4435
4436 // Nuffink needs to be done if mesh isn't distributed
4437 if (is_mesh_distributed())
4438 {
4439 // Storage for current processor and number of processors
4440 int n_proc = Comm_pt->nproc();
4441 int my_rank = Comm_pt->my_rank();
4442
4443 // Loop over domains on which non-halo counter parts of my halo nodes live
4444 for (int d = 0; d < n_proc; d++)
4445 {
4446 // On current processor: Receive data for my halo nodes with proc d.
4447 // Elsewhere: Send haloed data with proc d.
4448 if (d == my_rank)
4449 {
4450 // Loop over domains that hold non-halo counterparts of my halo nodes
4451 for (int dd = 0; dd < n_proc; dd++)
4452 {
4453 // Don't talk to yourself
4454 if (dd != d)
4455 {
4456 // How many of my nodes are halo nodes whose non-halo
4457 // counterpart is located on processor dd?
4458 int nnod_halo = this->nhalo_node(dd);
4460 if ((nnod_halo + nnod_ext_halo) != 0)
4461 {
4462 // Receive from processor dd number of haloed nodes (entry 0),
4463 // external haloed nodes (entry 1) and total number of
4464 // unsigneds to be sent below (entry 2)
4465 Vector<int> tmp(3);
4466 MPI_Recv(
4467 &tmp[0], 3, MPI_INT, dd, 0, Comm_pt->mpi_comm(), &status);
4468
4469#ifdef PARANOID
4470 // Check that number of halo/haloed nodes match
4471 int nnod_haloed = tmp[0];
4472 if (nnod_haloed != nnod_halo)
4473 {
4474 std::ostringstream error_message;
4475 error_message << "Clash in numbers of halo and haloed nodes "
4476 << std::endl;
4477 error_message << " between procs " << dd << " and " << d
4478 << ": " << nnod_haloed << " " << nnod_halo
4479 << std::endl;
4480 throw OomphLibError(error_message.str(),
4483 }
4484
4485 // Check that number of external halo/haloed nodes match
4486 int nnod_ext_haloed = tmp[1];
4488 {
4489 std::ostringstream error_message;
4490 error_message
4491 << "Clash in numbers of external halo and haloed nodes "
4492 << std::endl;
4493 error_message << " between procs " << dd << " and " << d
4494 << ": " << nnod_ext_haloed << " "
4495 << nnod_ext_halo << std::endl;
4496 throw OomphLibError(error_message.str(),
4499 }
4500#endif
4501
4502 // How many unsigneds are we about to receive
4503 unsigned n_rec = tmp[2];
4504
4505 // Get strung-together data from other proc
4508 n_rec,
4510 dd,
4511 0,
4512 Comm_pt->mpi_comm(),
4513 &status);
4514
4515 // Step through the flat-packed unsigneds
4516 unsigned count = 0;
4517
4518 // Normal and external halo nodes
4519 for (unsigned loop = 0; loop < 2; loop++)
4520 {
4521 unsigned hi_nod = nnod_halo;
4522 if (loop == 1)
4523 {
4525 }
4526 for (int j = 0; j < int(hi_nod); j++)
4527 {
4528 // Which node are we dealing with
4529 Node* nod_pt = 0;
4530 if (loop == 0)
4531 {
4532 nod_pt = this->halo_node_pt(dd, j);
4533 }
4534 else
4535 {
4536 nod_pt = this->external_halo_node_pt(dd, j);
4537 }
4538
4539 // How many values do we have locally?
4540 unsigned nval_local = nod_pt->nvalue();
4541
4542 // Read number of values on other side
4543 unsigned nval_other = unsigned_rec_data[count++];
4544
4545 if (nval_local != nval_other)
4546 {
4547 nod_pt->resize(nval_other);
4548 }
4549
4550 // Read number of entries in resize map
4551 unsigned nentry = unsigned_rec_data[count++];
4552 if (nentry != 0)
4553 {
4554 // Is current node a boundary node?
4556 dynamic_cast<BoundaryNodeBase*>(nod_pt);
4557#ifdef PARANOID
4558 if (bnod_pt == 0)
4559 {
4560 throw OomphLibError(
4561 "Failed to cast node to boundary node even though "
4562 "we've received data for boundary node",
4565 }
4566#endif
4567
4568 // Create storage for map if it doesn't already exist
4569 bool already_existed = true;
4570 if (
4571 bnod_pt
4572 ->index_of_first_value_assigned_by_face_element_pt() ==
4573 0)
4574 {
4575 bnod_pt
4576 ->index_of_first_value_assigned_by_face_element_pt() =
4577 new std::map<unsigned, unsigned>;
4578 already_existed = false;
4579 }
4580
4581 // Get pointer to the map of indices associated with
4582 // additional values created by face elements
4583 std::map<unsigned, unsigned>* map_pt =
4584 bnod_pt
4585 ->index_of_first_value_assigned_by_face_element_pt();
4586
4587 // Loop over number of entries in map (as received)
4588 for (unsigned i = 0; i < nentry; i++)
4589 {
4590 // Read out pairs...
4593
4594 // If it exists check that values are consistent:
4595 if (already_existed)
4596 {
4597#ifdef PARANOID
4599 {
4600 std::ostringstream error_message;
4601 error_message << "Existing map entry for map entry "
4602 << i << " for node located at ";
4603 unsigned n = nod_pt->ndim();
4604 for (unsigned ii = 0; ii < n; ii++)
4605 {
4606 error_message << nod_pt->position(ii) << " ";
4607 }
4608 error_message
4609 << "Key: " << first_received << " "
4610 << "Local value: " << (*map_pt)[first_received]
4611 << " "
4612 << "Received value: " << second_received
4613 << std::endl;
4614 throw OomphLibError(error_message.str(),
4617 }
4618#endif
4619 }
4620 // Else assign
4621 else
4622 {
4623 (*map_pt)[first_received] = second_received;
4624 }
4625 }
4626 }
4627 }
4628 }
4629 }
4630 }
4631 }
4632 }
4633 // Send my haloed nodes whose halo counterparts are located on processor
4634 // d
4635 else
4636 {
4637 // Storage for number of haloed nodes (entry 0), number of external
4638 // haloed nodes (entry 1) and total number of unsigneds to be sent
4639 // below (entry 2)
4640 Vector<int> tmp(3);
4641 int nnod_haloed = this->nhaloed_node(d);
4642 tmp[0] = nnod_haloed;
4644 tmp[1] = nnod_ext_haloed;
4645 if ((nnod_haloed + nnod_ext_haloed) != 0)
4646 {
4647 // Now string together the data
4649
4650 // Normal and external haloed nodes
4651 for (unsigned loop = 0; loop < 2; loop++)
4652 {
4653 unsigned hi_nod = nnod_haloed;
4654 if (loop == 1)
4655 {
4657 }
4658 for (int j = 0; j < int(hi_nod); j++)
4659 {
4660 // Which node are we dealing with?
4661 Node* nod_pt = 0;
4662 if (loop == 0)
4663 {
4664 nod_pt = this->haloed_node_pt(d, j);
4665 }
4666 else
4667 {
4668 nod_pt = this->external_haloed_node_pt(d, j);
4669 }
4670
4671 // Add number of values of node
4672 unsigned_send_data.push_back(nod_pt->nvalue());
4673
4674 // Get pointer to the map of indices associated with
4675 // additional values created by face elements
4676
4677 // Is it a boundary node?
4679 dynamic_cast<BoundaryNodeBase*>(nod_pt);
4680 if (bnod_pt == 0)
4681 {
4682 // Not a boundary node -- there are zero map entries to follow
4683 unsigned_send_data.push_back(0);
4684 }
4685 else
4686 {
4687 // It's a boundary node: Check if it's been resized
4688 std::map<unsigned, unsigned>* map_pt =
4689 bnod_pt->index_of_first_value_assigned_by_face_element_pt();
4690
4691 // No additional values created -- there are zero map entries
4692 // to follow
4693 if (map_pt == 0)
4694 {
4695 unsigned_send_data.push_back(0);
4696 }
4697 // Created additional values
4698 else
4699 {
4700 // How many map entries were there
4701 unsigned_send_data.push_back(map_pt->size());
4702
4703 // Loop over entries in map and add to send data
4704 for (std::map<unsigned, unsigned>::iterator p =
4705 map_pt->begin();
4706 p != map_pt->end();
4707 p++)
4708 {
4709 unsigned_send_data.push_back((*p).first);
4710 unsigned_send_data.push_back((*p).second);
4711 }
4712 }
4713 }
4714 }
4715 }
4716
4717 // How many values are there in total?
4719 tmp[2] = n_send;
4720
4721 // Send the counts across to the other processor
4722 MPI_Send(&tmp[0], 3, MPI_INT, d, 0, Comm_pt->mpi_comm());
4723
4724 // Send it across to the processor
4726 n_send,
4728 d,
4729 0,
4730 Comm_pt->mpi_comm());
4731 }
4732 }
4733 }
4734 }
4735
4737 {
4738 double t_end = TimingHelpers::timer();
4739 oomph_info << "Total time for Mesh::resize_halo_nodes(): "
4740 << t_end - t_start << std::endl;
4741 }
4742 }
4743
4744
4745 //========================================================================
4746 /// Get all the halo data stored in the mesh and add pointers to
4747 /// the data to the map, indexed by global equation number
4748 //=========================================================================
4749 void Mesh::get_all_halo_data(std::map<unsigned, double*>& map_of_halo_data)
4750 {
4751 // Loop over the map of Halo_nodes
4752 for (std::map<unsigned, Vector<Node*>>::iterator it = Halo_node_pt.begin();
4753 it != Halo_node_pt.end();
4754 ++it)
4755 {
4756 // Find the number of nodes
4757 unsigned n_node = (it->second).size();
4758 // Loop over them all
4759 for (unsigned n = 0; n < n_node; n++)
4760 {
4761 // Add the Node's values (including any solid values) to
4762 // the map
4763 (it->second)[n]->add_value_pt_to_map(map_of_halo_data);
4764 }
4765 } // End of loop over halo nodes
4766
4767 // Now loop over all the halo elements and add their internal data
4768 // Loop over the map of Halo_nodes
4769 for (std::map<unsigned, Vector<GeneralisedElement*>>::iterator it =
4770 Root_halo_element_pt.begin();
4771 it != Root_halo_element_pt.end();
4772 ++it)
4773 {
4774 // Find the number of root elements
4775 unsigned n_element = (it->second).size();
4776 for (unsigned e = 0; e < n_element; e++)
4777 {
4778 GeneralisedElement* el_pt = (it->second)[e];
4779
4780 // Is it a refineable element?
4782 if (ref_el_pt != 0)
4783 {
4784 // Vector of pointers to leaves in tree emanating from
4785 // current root halo element
4787 ref_el_pt->tree_pt()->stick_leaves_into_vector(leaf_pt);
4788
4789 // Loop over leaves and add their objects (the finite elements)
4790 // to vector
4791 unsigned nleaf = leaf_pt.size();
4792 for (unsigned l = 0; l < nleaf; l++)
4793 {
4794 leaf_pt[l]->object_pt()->add_internal_value_pt_to_map(
4796 }
4797 }
4798 else
4799 {
4801 }
4802 }
4803 }
4804
4805 /// Repeat for the external data
4806 for (std::map<unsigned, Vector<Node*>>::iterator it =
4807 External_halo_node_pt.begin();
4808 it != External_halo_node_pt.end();
4809 ++it)
4810 {
4811 // Find the number of nodes
4812 unsigned n_node = (it->second).size();
4813 // Loop over them all
4814 for (unsigned n = 0; n < n_node; n++)
4815 {
4816 // Add the Node's values (including any solid values) to
4817 // the map
4818 (it->second)[n]->add_value_pt_to_map(map_of_halo_data);
4819 }
4820 } // End of loop over halo nodes
4821
4822 // Now loop over all the halo elements and add their internal data
4823 // Loop over the map of Halo_nodes
4824 for (std::map<unsigned, Vector<GeneralisedElement*>>::iterator it =
4827 ++it)
4828 {
4829 // Find the number of root elements
4830 unsigned n_element = (it->second).size();
4831 for (unsigned e = 0; e < n_element; e++)
4832 {
4833 (it->second)[e]->add_internal_value_pt_to_map(map_of_halo_data);
4834 }
4835 }
4836 }
4837
4838
4839 //========================================================================
4840 /// Get halo node stats for this distributed mesh:
4841 /// Average/max/min number of halo nodes over all processors.
4842 /// \b Careful: Involves MPI Broadcasts and must therefore
4843 /// be called on all processors!
4844 //========================================================================
4846 unsigned& max_number,
4847 unsigned& min_number)
4848 {
4849 // Storage for number of processors and current processor
4850 int n_proc = Comm_pt->nproc();
4851 int my_rank = Comm_pt->my_rank();
4852
4853 // Create vector to hold number of halo nodes
4855
4856 // Stick own number of halo nodes into appropriate entry
4858
4859 // Gather information on root processor: First argument group
4860 // specifies what is to be sent (one int from each procssor, indicating
4861 // the number of dofs on it), the second group indicates where
4862 // the results are to be gathered (in rank order) on root processor.
4864 1,
4865 MPI_INT,
4866 &nhalo_nodes[0],
4867 1,
4868 MPI_INT,
4869 0,
4870 Comm_pt->mpi_comm());
4871
4872 // Initialise stats
4873 av_number = 0.0;
4874 int max = -1;
4875 int min = 1000000000;
4876
4877 if (my_rank == 0)
4878 {
4879 for (int i = 0; i < n_proc; i++)
4880 {
4882 if (int(nhalo_nodes[i]) > max) max = nhalo_nodes[i];
4883 if (int(nhalo_nodes[i]) < min) min = nhalo_nodes[i];
4884 }
4886 }
4887
4888 // Now broadcast the result back out
4889 MPI_Bcast(&max, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4890 MPI_Bcast(&min, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4891 MPI_Bcast(&av_number, 1, MPI_DOUBLE, 0, Comm_pt->mpi_comm());
4892
4893 max_number = max;
4894 min_number = min;
4895 }
4896
4897
4898 //========================================================================
4899 /// Get haloed node stats for this distributed mesh:
4900 /// Average/max/min number of haloed nodes over all processors.
4901 /// \b Careful: Involves MPI Broadcasts and must therefore
4902 /// be called on all processors!
4903 //========================================================================
4905 unsigned& max_number,
4906 unsigned& min_number)
4907 {
4908 // Storage for number of processors and current processor
4909 int n_proc = Comm_pt->nproc();
4910 int my_rank = Comm_pt->my_rank();
4911
4912 // Create vector to hold number of haloed nodes
4914
4915 // Stick own number of haloed nodes into appropriate entry
4917
4918 // Gather information on root processor: First argument group
4919 // specifies what is to be sent (one int from each procssor, indicating
4920 // the number of dofs on it), the second group indicates where
4921 // the results are to be gathered (in rank order) on root processor.
4923 1,
4924 MPI_INT,
4925 &nhaloed_nodes[0],
4926 1,
4927 MPI_INT,
4928 0,
4929 Comm_pt->mpi_comm());
4930
4931 // Initialise stats
4932 av_number = 0.0;
4933 int max = -1;
4934 int min = 1000000000;
4935
4936 if (my_rank == 0)
4937 {
4938 for (int i = 0; i < n_proc; i++)
4939 {
4941 if (int(nhaloed_nodes[i]) > max) max = nhaloed_nodes[i];
4942 if (int(nhaloed_nodes[i]) < min) min = nhaloed_nodes[i];
4943 }
4945 }
4946
4947 // Now broadcast the result back out
4948 MPI_Bcast(&max, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4949 MPI_Bcast(&min, 1, MPI_INT, 0, Comm_pt->mpi_comm());
4950 MPI_Bcast(&av_number, 1, MPI_DOUBLE, 0, Comm_pt->mpi_comm());
4951
4952 max_number = max;
4953 min_number = min;
4954 }
4955
4956 //========================================================================
4957 /// Distribute the mesh. Add to vector of deleted elements.
4958 //========================================================================
4962 DocInfo& doc_info,
4963 const bool& report_stats,
4965 {
4966 // Store communicator
4967 Comm_pt = comm_pt;
4968
4969 // Storage for number of processors and current processor
4970 int n_proc = comm_pt->nproc();
4971 int my_rank = comm_pt->my_rank();
4972
4973 // Storage for number of elements and number of nodes on this mesh
4974 unsigned nelem = this->nelement();
4975 unsigned nnod = this->nnode();
4976
4977 std::ostringstream filename;
4978
4979 // Doc the partitioning (only on processor 0)
4980 //-------------------------------------------
4981 if (doc_info.is_doc_enabled())
4982 {
4983 if (my_rank == 0)
4984 {
4985 // Open files for doc of element partitioning
4987 for (int d = 0; d < n_proc; d++)
4988 {
4989 // Note: doc_info.number() was set in Problem::distribute(...) to
4990 // reflect the submesh number
4991 // Clear the filename
4992 filename.str("");
4993 filename << doc_info.directory() << "/domain" << d << "-"
4994 << doc_info.number() << ".dat";
4995 domain_file[d] = new std::ofstream(filename.str().c_str());
4996 }
4997
4998 // Doc
4999 for (unsigned e = 0; e < nelem; e++)
5000 {
5001 // If we can't cast to a finite element, we can't output because
5002 // there is no output function
5004 dynamic_cast<FiniteElement*>(this->element_pt(e));
5005 if (f_el_pt != 0)
5006 {
5008 }
5009 }
5010
5011 for (int d = 0; d < n_proc; d++)
5012 {
5013 domain_file[d]->close();
5014 delete domain_file[d];
5015 domain_file[d] = 0;
5016 }
5017 }
5018 }
5019
5020 // Loop over all elements, associate all
5021 //--------------------------------------
5022 // nodes with the highest-numbered processor and record all
5023 //---------------------------------------------------------
5024 // processors the node is associated with
5025 //---------------------------------------
5026
5027 // Storage for processors in charge and processors associated with data
5028 std::map<Data*, std::set<unsigned>> processors_associated_with_data;
5029 std::map<Data*, unsigned> processor_in_charge;
5030
5031 // For all nodes set the processor in charge to zero
5032 for (unsigned j = 0; j < nnod; j++)
5033 {
5034 Node* nod_pt = this->node_pt(j);
5036 }
5037
5038 // Loop over elements
5039 for (unsigned e = 0; e < nelem; e++)
5040 {
5041 // Get an element and its domain
5042 FiniteElement* el_pt = dynamic_cast<FiniteElement*>(this->element_pt(e));
5043 // Element will only have nodes if it is a finite element
5044 if (el_pt != 0)
5045 {
5046 unsigned el_domain = element_domain[e];
5047
5048 // Associate nodes with highest numbered processor
5049 unsigned nnod = el_pt->nnode();
5050 for (unsigned j = 0; j < nnod; j++)
5051 {
5052 Node* nod_pt = el_pt->node_pt(j);
5053
5054 // processor in charge was initialised to 0 above
5056 {
5058 }
5060 }
5061 }
5062 }
5063
5064 // Doc the partitioning (only on processor 0)
5065 //-------------------------------------------
5066 if (doc_info.is_doc_enabled())
5067 {
5068 if (my_rank == 0)
5069 {
5070 // Open files for doc of node partitioning
5072 for (int d = 0; d < n_proc; d++)
5073 {
5074 // Note: doc_info.number() was set in Problem::distribute(...) to
5075 // reflect the submesh number...
5076 // Clear the filename
5077 filename.str("");
5078 filename << doc_info.directory() << "/node" << d << "-"
5079 << doc_info.number() << ".dat";
5080 node_file[d] = new std::ofstream(filename.str().c_str());
5081 }
5082
5083 // Doc
5084 for (unsigned j = 0; j < nnod; j++)
5085 {
5086 Node* nod_pt = this->node_pt(j);
5087 const unsigned n_dim = nod_pt->ndim();
5088 for (unsigned i = 0; i < n_dim; i++)
5089 {
5090 *node_file[processor_in_charge[nod_pt]] << nod_pt->x(i) << " ";
5091 }
5093 }
5094 for (int d = 0; d < n_proc; d++)
5095 {
5096 node_file[d]->close();
5097 delete node_file[d];
5098 node_file[d] = 0;
5099 }
5100 }
5101 }
5102
5103 // Declare all nodes as obsolete. We'll
5104 // change this setting for all nodes that must be retained
5105 // further down
5106 for (unsigned j = 0; j < nnod; j++)
5107 {
5108 this->node_pt(j)->set_obsolete();
5109 }
5110
5111
5112 // Backup old mesh data and flush mesh
5113 //-------------------------------------
5114
5115 // Backup pointers to elements in this mesh
5117 for (unsigned e = 0; e < nelem; e++)
5118 {
5119 backed_up_el_pt[e] = this->element_pt(e);
5120 }
5121
5122 // Info. used to re-generate the boundary element scheme after the
5123 // deletion of elements not belonging to the current processor)
5124
5125 // Get the number of boundary elements before the deletion of non
5126 // retained elements
5127 const unsigned tmp_nboundary = this->nboundary();
5129 // In case that there are regions, also get the number of boundary
5130 // elements in each region
5132 // Also get the finite element version of the elements and back them
5133 // up
5135
5136 // Only do this if the mesh is a TriangleMeshBase
5137 TriangleMeshBase* triangle_mesh_pt = dynamic_cast<TriangleMeshBase*>(this);
5138 bool is_a_triangle_mesh_base_mesh = false;
5139 if (triangle_mesh_pt != 0)
5140 {
5141 // Set the flag to indicate we are working with a TriangleMeshBase
5142 // mesh
5144
5145 // Are there regions?
5146 const unsigned n_regions = triangle_mesh_pt->nregion();
5147
5148 // Loop over the boundaries
5149 for (unsigned ib = 0; ib < tmp_nboundary; ib++)
5150 {
5151 // Get the number of boundary elements
5153
5154 // Resize the container
5156
5157 // Loop over the regions
5158 for (unsigned rr = 0; rr < n_regions; rr++)
5159 {
5160 // Get the region id
5161 const unsigned region_id =
5162 static_cast<unsigned>(triangle_mesh_pt->region_attribute(rr));
5163
5164 // Store the number of element in the region (notice we are
5165 // using the region index not the region id to refer to the
5166 // region)
5168 triangle_mesh_pt->nboundary_element_in_region(ib, region_id);
5169
5170 } // for (rr < n_regions)
5171
5172 } // for (ib < tmp_nboundary)
5173
5174 for (unsigned e = 0; e < nelem; e++)
5175 {
5176 // Get the finite element version of the elements and back them
5177 // up
5179 }
5180
5181 } // if (triangle_mesh_pt!=0)
5182
5183 // Flush the mesh storage
5184 this->flush_element_storage();
5185
5186 // Delete any storage of external elements and nodes
5188
5189 // Boolean to indicate which element is to be retained
5190 std::vector<bool> element_retained(nelem, false);
5191
5192 // Storage for element numbers of root halo elements that will be
5193 // retained on current processor: root_halo_element[p][j]
5194 // stores the element number (in the order in which the elements are stored
5195 // in backed_up_el_pt) of the j-th root halo element with processor
5196 // p.
5198
5199 // Dummy entry to make sure we always have something to send
5200 for (int p = 0; p < n_proc; p++)
5201 {
5202 root_halo_element[p].push_back(-1);
5203 }
5204
5205 // Determine which elements are going to end up on which processor
5206 //----------------------------------------------------------------
5207 unsigned number_of_retained_elements = 0;
5208
5209 // Loop over all backed up elements
5211 for (unsigned e = 0; e < nelem; e++)
5212 {
5213 // Get element and its domain
5215 unsigned el_domain = element_domain[e];
5216
5217 // If element is located on current processor add it back to the mesh
5218 if (el_domain == unsigned(my_rank))
5219 {
5220 // Add element to current processor
5221 element_retained[e] = true;
5223 }
5224 // Otherwise we may still need it if it's a halo element:
5225 else
5226 {
5227 // If this current mesh has been told to keep all elements as halos,
5228 // OR the element itself knows that it must be kept then
5229 // keep it
5230 if ((this->Keep_all_elements_as_halos) ||
5231 (el_pt->must_be_kept_as_halo()))
5232 {
5234 {
5235 // Add as root halo element whose non-halo counterpart is
5236 // located on processor el_domain
5237 if (!element_retained[e])
5238 {
5239 root_halo_element[el_domain].push_back(e);
5240 element_retained[e] = true;
5242 }
5243 }
5244 }
5245 // Otherwise: Is one of the nodes associated with the current processor?
5246 else
5247 {
5248 // Can only have nodes if this is a finite element
5249 FiniteElement* finite_el_pt = dynamic_cast<FiniteElement*>(el_pt);
5250 if (finite_el_pt != 0)
5251 {
5252 unsigned n_node = finite_el_pt->nnode();
5253 for (unsigned n = 0; n < n_node; n++)
5254 {
5256
5257 // Keep element? (use stl find?)
5258 unsigned keep_it = false;
5259 for (std::set<unsigned>::iterator it =
5262 it++)
5263 {
5264 if (*it == unsigned(my_rank))
5265 {
5266 keep_it = true;
5267 // Break out of the loop over processors
5268 break;
5269 }
5270 }
5271
5272 // Add a root halo element either if keep_it=true
5273 if (keep_it)
5274 {
5275 // Add as root halo element whose non-halo counterpart is
5276 // located on processor el_domain
5277 if (!element_retained[e])
5278 {
5279 root_halo_element[el_domain].push_back(e);
5280 element_retained[e] = true;
5282 }
5283 // Now break out of loop over nodes
5284 break;
5285 }
5286 }
5287 }
5288 } // End of testing for halo by virtue of shared nodes
5289 } // End of halo element conditions
5290 } // end of loop over elements
5291
5292 // First check that the number of elements is greater than zero, when
5293 // working with submeshes it may be possible that some of them have
5294 // no elements (face element meshes) since those may have been
5295 // deleted in "Problem::actions_before_distribute()"
5296 if (nelem > 0)
5297 {
5298 // Check that we are working with a TriangleMeshBase mesh, if
5299 // that is the case then we need to create shared boundaries
5301 {
5302 // Creation of shared boundaries
5303 // ------------------------------
5304 // All processors share the same boundary id for the created
5305 // shared boundary. We need all the elements on all processors,
5306 // that is why this step is performed before the deletion of the
5307 // elements not associated to the current processor.
5308 // N.B.: This applies only to unstructured meshes
5309 this->create_shared_boundaries(comm_pt,
5315 } // if (is_a_triangle_mesh_base_mesh)
5316 } // if (nelem > 0)
5317
5318 // NOTE: No need to add additional layer of halo elements.
5319 // Procedure for removing "overlooked" halo nodes in
5320 // deals classify_halo_and_haloed_nodes() deals
5321 // with the problem addressed here. [code that dealt with this
5322 // problem at distribution stage has been removed]
5323
5324 // Store the finite element pointer version of the elements that are
5325 // about to be deleted, used to reset the boundary elements info
5327
5328 // Copy the elements associated with the actual
5329 // current processor into its own permanent storage.
5330 // Do it in the order in which the elements appeared originally
5332 for (unsigned e = 0; e < nelem; e++)
5333 {
5335 if (element_retained[e])
5336 {
5337 this->add_element_pt(el_pt);
5338 }
5339 else
5340 {
5341 // Flush the object attached to the tree for this element?
5343 if (ref_el_pt != 0)
5344 {
5345 ref_el_pt->tree_pt()->flush_object();
5346 }
5347
5348
5349 // Store pointer to the element that's about to be deleted.
5350
5351 // Only for structured meshes since this "deleted_element_pt"
5352 // vector is used in the "problem" class to set null pointer to
5353 // the deleted elements in the Base_mesh_element_pt structure
5355 {
5356 deleted_element_pt.push_back(el_pt);
5357 } // if (!is_a_triangle_mesh_base_mesh)
5358
5360 {
5361 // Store pointer to the finite element that's about to be deleted
5363 }
5364
5365 // Delete the element
5366 delete el_pt;
5367 }
5368 }
5369
5370 // Copy the root halo elements associated with the actual
5371 // current processor into its own permanent storage; the order
5372 // here is somewhat random but we compensate for that by
5373 // ensuring that the corresponding haloed elements are
5374 // added in the same order below
5375#ifdef PARANOID
5376 std::map<unsigned, bool> done;
5377#endif
5378 for (int d = 0; d < n_proc; d++)
5379 {
5381 for (unsigned e = 0; e < nelem; e++)
5382 {
5383 int number = root_halo_element[d][e];
5384 if (number >= 0)
5385 {
5386#ifdef PARANOID
5387 if (done[number])
5388 {
5389 std::ostringstream error_message;
5390 error_message << "Have already added element " << number
5391 << " as root halo element\n"
5392 << std::endl;
5393 throw OomphLibError(error_message.str(),
5396 }
5397 done[number] = true;
5398#endif
5399 this->add_root_halo_element_pt(d, backed_up_el_pt[number]);
5400 }
5401 }
5402 }
5403
5404
5405 // Now get root haloed elements: root_haloed_element[p][j] stores
5406 // the element number (in the order in which the elements are stored
5407 // in backedup_el_pt) of the j-th rooted halo element with processor
5408 // p. On proc my_proc this the same as root_haloed_element[my_proc][j]
5409 // on processor p, so get the information by gatherv operations.
5411
5412 // Find out number of root halo elements with each other proc
5415 for (int p = 0; p < n_proc; p++)
5416 {
5418 }
5419
5420 // Each processor sends number of halo elements it has with processor
5421 // p to processor p where this information is stored in nhaloed[...]
5422 for (int p = 0; p < n_proc; p++)
5423 {
5424 // Gather the p-th entries in nhalo from every processor on
5425 // processor p and store them in nhaloed consecutively
5426 // starting at beginning
5427 MPI_Gather(
5428 &nhalo[p], 1, MPI_INT, &nhaloed[0], 1, MPI_INT, p, comm_pt->mpi_comm());
5429 }
5430
5431 // In the big sequence of concatenated root halo elements (enumerated
5432 // individually on the various processors) where do the root halo
5433 // elements from a given processor start? Also figure out how many
5434 // root haloed elements there are in total by summing up their numbers
5437 for (int i_proc = 0; i_proc < n_proc; i_proc++)
5438 {
5440 if (i_proc != 0)
5441 {
5444 }
5445 else
5446 {
5447 start_index[0] = 0;
5448 }
5449 }
5450
5451 // Storage for all root haloed elements from the various processors, one
5452 // after the other, with some padding from negative entries to avoid
5453 // zero length vectors
5455 0);
5456
5457 // Now send the ids of the relevant elements via gatherv
5458 for (int p = 0; p < n_proc; p++)
5459 {
5460 // Gather the p-th entries in nhalo from every processor on
5461 // processor p and store them in nhaloed consecutively
5462 // starting at beginning
5463 MPI_Gatherv(&root_halo_element[p][0], // pointer to first entry in vector
5464 // to be gathered on processor p
5465 nhalo[p], // Number of entries to be sent
5466 MPI_INT,
5467 &all_root_haloed_element[0], // Target -- this will store
5468 // the element numbers of
5469 // all root haloed elements
5470 // received from other processors
5471 // in order
5472 &nhaloed[0], // Pointer to the vector containing the lengths
5473 // of the vectors received from elsewhere
5474 &start_index[0], // "offset" for storage of vector received
5475 // from various processors in the global
5476 // concatenated vector
5477 MPI_INT,
5478 p, // processor that gathers the information
5479 comm_pt->mpi_comm());
5480 }
5481
5482
5483 // Determine root haloed elements
5484 //-------------------------------
5485
5486 // Loop over all other processors
5487 unsigned count = 0;
5488 for (int d = 0; d < n_proc; d++)
5489 {
5490#ifdef PARANOID
5491 std::map<unsigned, bool> done;
5492#endif
5493
5494 // Loop over root haloed elements with specified processor
5495 unsigned n = nhaloed[d];
5496 for (unsigned e = 0; e < n; e++)
5497 {
5498 int number = all_root_haloed_element[count];
5499 count++;
5500
5501 // Ignore padded -1s that were only inserted to avoid
5502 // zero sized vectors
5503 if (number >= 0)
5504 {
5505 // Get pointer to element
5507
5508 // Halo elements can't be haloed themselves
5509 if (!el_pt->is_halo())
5510 {
5511#ifdef PARANOID
5512 if (done[number])
5513 {
5514 std::ostringstream error_message;
5515 error_message << "Have already added element " << number
5516 << " as root haloed element\n"
5517 << std::endl;
5518 throw OomphLibError(error_message.str(),
5521 }
5522 done[number] = true;
5523#endif
5524
5525 // Current element is haloed by other processor
5527 }
5528 }
5529 }
5530 }
5531
5532
5533 // Doc stats
5534 if (report_stats)
5535 {
5536 oomph_info << "Processor " << my_rank << " holds " << this->nelement()
5537 << " elements of which " << this->nroot_halo_element()
5538 << " are root halo elements \n while "
5539 << this->nroot_haloed_element() << " are root haloed elements"
5540 << std::endl;
5541 }
5542
5543
5544 // Loop over all retained elements and mark their nodes
5545 //-----------------------------------------------------
5546 // as to be retained too (some double counting going on here)
5547 //-----------------------------------------------------------
5548 nelem = this->nelement();
5549 for (unsigned e = 0; e < nelem; e++)
5550 {
5552 dynamic_cast<FiniteElement*>(this->element_pt(e));
5553
5554 // If we have a finite element
5555 if (f_el_pt != 0)
5556 {
5557 // Loop over nodes
5558 unsigned nnod = f_el_pt->nnode();
5559 for (unsigned j = 0; j < nnod; j++)
5560 {
5562 nod_pt->set_non_obsolete();
5563 }
5564 }
5565 }
5566
5567
5568 // Now remove the pruned nodes
5569 this->prune_dead_nodes();
5570
5571
5572#ifdef OOMPH_HAS_TRIANGLE_LIB
5573 if (is_a_triangle_mesh_base_mesh)
5574 {
5575 triangle_mesh_pt->reset_boundary_element_info(
5579 } // if (tri_mesh_pt!=0)
5580 else
5581 {
5582#endif // #ifdef OOMPH_HAS_TRIANGLE_LIB
5583
5584 // Temporarly set the mesh as distributed
5586
5587#ifdef OOMPH_HAS_TRIANGLE_LIB
5588 }
5589#endif
5590
5591 // Re-setup tree forest. (Call this every time even if
5592 // a (distributed) mesh has no elements on this processor.
5593 // We still need to participate in communication.)
5595 dynamic_cast<TreeBasedRefineableMeshBase*>(this);
5596 if (ref_mesh_pt != 0)
5597 {
5598 ref_mesh_pt->setup_tree_forest();
5599 }
5600
5601 // Classify nodes
5603
5604 // Doc?
5605 //-----
5606 if (doc_info.is_doc_enabled())
5607 {
5608 doc_mesh_distribution(doc_info);
5609 }
5610 }
5611
5612
5613 //========================================================================
5614 /// (Irreversibly) prune halo(ed) elements and nodes, usually
5615 /// after another round of refinement, to get rid of
5616 /// excessively wide halo layers. Note that the current
5617 /// mesh will be now regarded as the base mesh and no unrefinement
5618 /// relative to it will be possible once this function
5619 /// has been called.
5620 //========================================================================
5623 DocInfo& doc_info,
5624 const bool& report_stats)
5625 {
5626 // MemoryUsage::doc_memory_usage("at start of mesh-level prunes");
5627
5628
5629#ifdef OOMPH_HAS_MPI
5630 // Delete any external element storage before performing the redistribution
5631 // (in particular, external halo nodes that are on mesh boundaries)
5633#endif
5634
5636 dynamic_cast<TreeBasedRefineableMeshBase*>(this);
5637 if (ref_mesh_pt != 0)
5638 {
5639 // Storage for number of processors and current processor
5640 int n_proc = Comm_pt->nproc();
5641 int my_rank = Comm_pt->my_rank();
5642
5643 // Doc stats
5644 if (report_stats)
5645 {
5647 << "\n----------------------------------------------------\n";
5648 oomph_info << "Before pruning: Processor " << my_rank << " holds "
5649 << this->nelement() << " elements of which "
5650 << this->nroot_halo_element()
5651 << " are root halo elements \n while "
5652 << this->nroot_haloed_element()
5653 << " are root haloed elements" << std::endl;
5654
5655 // Report total number of halo(ed) and shared nodes for this process
5656 oomph_info << "Before pruning: Processor " << my_rank << " holds "
5657 << this->nnode() << " nodes of which " << this->nhalo_node()
5658 << " are halo nodes \n while " << this->nhaloed_node()
5659 << " are haloed nodes, and " << this->nshared_node()
5660 << " are shared nodes." << std::endl;
5661
5662 // Report number of halo(ed) and shared nodes with each domain
5663 // from the current process
5664 for (int iproc = 0; iproc < n_proc; iproc++)
5665 {
5666 oomph_info << "Before pruning: With process " << iproc
5667 << ", there are " << this->nhalo_node(iproc)
5668 << " halo nodes, and " << std::endl
5669 << this->nhaloed_node(iproc) << " haloed nodes, and "
5670 << this->nshared_node(iproc) << " shared nodes"
5671 << std::endl;
5672 }
5674 << "----------------------------------------------------\n\n";
5675 }
5676
5677
5678 double t_start = 0.0;
5680 {
5682 }
5683
5684 // Declare all nodes as obsolete. We'll
5685 // change this setting for all nodes that must be retained
5686 // further down
5687 unsigned nnod = this->nnode();
5688 for (unsigned j = 0; j < nnod; j++)
5689 {
5690 this->node_pt(j)->set_obsolete();
5691 }
5692
5693 // Backup pointers to elements in this mesh (they must be finite elements
5694 // beacuse it's a refineable mesh)
5695 unsigned nelem = this->nelement();
5697 std::map<RefineableElement*, bool> keep_element;
5698 for (unsigned e = 0; e < nelem; e++)
5699 {
5701 }
5702
5703 // MemoryUsage::doc_memory_usage("after backed up elements");
5704
5705 // Get the min and max refinement level, and current refinement pattern
5706 unsigned min_ref = 0;
5707 unsigned max_ref = 0;
5708
5709 // Get min and max refinement level
5710 unsigned local_min_ref = 0;
5711 unsigned local_max_ref = 0;
5712 ref_mesh_pt->get_refinement_levels(local_min_ref, local_max_ref);
5713
5714 // Reconcile between processors: If (e.g. following distribution/pruning)
5715 // the mesh has no elements on this processor) then ignore its
5716 // contribution to the poll of max/min refinement levels
5719
5720 if (nelem == 0)
5721 {
5724 }
5725
5726 int int_min_ref = 0;
5727 int int_max_ref = 0;
5728
5730 &int_min_ref,
5731 1,
5732 MPI_INT,
5733 MPI_MIN,
5734 Comm_pt->mpi_comm());
5736 &int_max_ref,
5737 1,
5738 MPI_INT,
5739 MPI_MAX,
5740 Comm_pt->mpi_comm());
5741
5744
5745 // Get refinement pattern
5747 ref_mesh_pt->get_refinement_pattern(current_refined);
5748
5749 // get_refinement_pattern refers to the elements at each level
5750 // that were refined when proceeding to the next level
5752
5753 // Bypass if no elements
5754 if (nelem == 0)
5755 {
5757 }
5758
5759 // Reconcile between processors
5760 int int_n_ref = 0;
5762 &local_n_ref, &int_n_ref, 1, MPI_INT, MPI_MAX, Comm_pt->mpi_comm());
5763 unsigned n_ref = int(int_n_ref);
5764
5765
5766 double t_end = 0.0;
5768 {
5771 << "Time for establishing refinement levels in "
5772 << " Mesh::prune_halo_elements_and_nodes() [includes comms]: "
5773 << t_end - t_start << std::endl;
5775 }
5776
5777 // MemoryUsage::doc_memory_usage("after establishing refinement levels");
5778
5779 // Bypass everything until next comms if no elements
5780 if (nelem > 0)
5781 {
5782 // Loop over all elements; keep those on the min refinement level
5783 // Need to go back to the level indicated by min_ref
5784 unsigned base_level = n_ref - (max_ref - min_ref);
5785
5786 // This is the new minimum uniform refinement level
5787 // relative to total unrefined base mesh
5788 ref_mesh_pt->uniform_refinement_level_when_pruned() = min_ref;
5789
5790 // Get the elements at the specified "base" refinement level
5792 ref_mesh_pt->get_elements_at_refinement_level(base_level,
5794
5795 // Loop over the elements at this level
5797 for (unsigned e = 0; e < n_base_el; e++)
5798 {
5799 // Extract correct element...
5801
5802
5803 // Check it exists
5804 if (ref_el_pt != 0)
5805 {
5806 // Keep all non-halo elements, remove excess halos
5808 {
5809 keep_element[ref_el_pt] = true;
5810
5811 // Loop over this non-halo element's nodes and retain them too
5812 unsigned nnod = ref_el_pt->nnode();
5813 for (unsigned j = 0; j < nnod; j++)
5814 {
5816 }
5817 }
5818 }
5819
5820 } // end loop over base level elements
5821 }
5822
5823 // Synchronise refinement level when pruned over all meshes even if they
5824 // were empty (in which case the uniform refinement level is still zero
5825 // so go for max
5826 unsigned n_unif = 0;
5827 unsigned n_unif_local =
5828 ref_mesh_pt->uniform_refinement_level_when_pruned();
5830 &n_unif_local, &n_unif, 1, MPI_UNSIGNED, MPI_MAX, Comm_pt->mpi_comm());
5831 ref_mesh_pt->uniform_refinement_level_when_pruned() = n_unif;
5832
5833
5834 t_end = 0.0;
5836 {
5839 << "Time for synchronising refinement levels in "
5840 << " Mesh::prune_halo_elements_and_nodes() [includes comms]: "
5841 << t_end - t_start << std::endl;
5843 }
5844
5845
5846 // MemoryUsage::doc_memory_usage("after synchronising refinement levels");
5847
5848
5849 // Now work on which "root" halo elements to keep at this level
5850 // Can't use the current set directly; however,
5851 // we know the refinement level of the current halo, so
5852 // it is possible to go from that backwards to find the "father
5853 // halo element" necessary to complete this step
5854
5855 // Temp map of vectors holding the pointers to the root halo elements
5856 std::map<unsigned, Vector<FiniteElement*>> tmp_root_halo_element_pt;
5857
5858 // Temp map of vectors holding the pointers to the root haloed elements
5859 std::map<unsigned, Vector<FiniteElement*>> tmp_root_haloed_element_pt;
5860
5861 // Make sure we only push back each element once
5862 std::map<unsigned, std::map<FiniteElement*, bool>>
5864
5865 // Map to store if a halo element survives
5866 std::map<FiniteElement*, bool> halo_element_is_retained;
5867
5868 for (int domain = 0; domain < n_proc; domain++)
5869 {
5870 // Get vector of halo elements with processor domain by copy operation
5872
5873 // Loop over halo elements associated with this adjacent domain
5874 unsigned nelem = halo_elem_pt.size();
5875 for (unsigned e = 0; e < nelem; e++)
5876 {
5877 // Get element
5879 dynamic_cast<RefineableElement*>(halo_elem_pt[e]);
5880
5881 // An element should only be kept if its refinement
5882 // level is the same as the minimum refinement level
5883 unsigned halo_el_level = ref_el_pt->refinement_level();
5884
5886 if (halo_el_level == min_ref)
5887 {
5888 // Already at the correct level
5889 el_pt = ref_el_pt;
5890 }
5891 else
5892 {
5893 // Need to go up the tree to the father element at min_ref
5895 ref_el_pt->get_father_at_refinement_level(min_ref, father_el_pt);
5897 }
5898
5899 // Now loop over nodes in the halo element and retain it as
5900 // halo element if any of it's nodes are shared with one of the
5901 // non-halo-elements that we've already identified earlier -- these
5902 // were set to non-obsolete above.
5903 unsigned nnod = el_pt->nnode();
5904 for (unsigned j = 0; j < nnod; j++)
5905 {
5906 // Node has been reclaimed by one of the non-halo elements above
5907 Node* nod_pt = el_pt->node_pt(j);
5908 if (!nod_pt->is_obsolete())
5909 {
5910 // Keep element and add it to preliminary storage for
5911 // halo elements associated with current neighbouring domain
5913 {
5916 }
5917 keep_element[el_pt] = true;
5919 break;
5920 }
5921 }
5922 }
5923 }
5924
5926 {
5928 oomph_info << "Time for setup of retention pattern in "
5929 << " Mesh::prune_halo_elements_and_nodes(): "
5930 << t_end - t_start << std::endl;
5932 }
5933
5934 // MemoryUsage::doc_memory_usage("after setup of retention pattern");
5935
5936 // Make sure everybody finishes this part
5937 MPI_Barrier(Comm_pt->mpi_comm());
5938
5939 // Now all processors have decided (independently) which of their
5940 // (to-be root) halo elements they wish to retain. Now we need to figure
5941 // out which of their elements are haloed and add them in the appropriate
5942 // order into the haloed element scheme. For this we exploit that
5943 // the halo and haloed elements are accessed in the same order on
5944 // all processors!
5945
5946 // Identify haloed elements on domain d
5947 for (int d = 0; d < n_proc; d++)
5948 {
5949 // Loop over domains that halo this domain
5950 for (int dd = 0; dd < n_proc; dd++)
5951 {
5952 // Dont't talk to yourself
5953 if (d != dd)
5954 {
5955 // If we're identifying my haloed elements:
5956 if (d == my_rank)
5957 {
5958 // Get vector all elements that are currently haloed by domain dd
5960 this->haloed_element_pt(dd));
5961
5962 // Create a vector of ints to indicate if the halo element
5963 // on processor dd processor was kept
5964 unsigned nelem = haloed_elem_pt.size();
5966
5967 // Receive this vector from processor dd
5968 if (nelem != 0)
5969 {
5971 MPI_Recv(&halo_kept[0],
5972 nelem,
5973 MPI_INT,
5974 dd,
5975 0,
5976 Comm_pt->mpi_comm(),
5977 &status);
5978
5979 // Classify haloed element accordingly
5980 for (unsigned e = 0; e < nelem; e++)
5981 {
5983 dynamic_cast<RefineableElement*>(haloed_elem_pt[e]);
5984
5985 // An element should only be kept if its refinement
5986 // level is the same as the minimum refinement level
5987 unsigned haloed_el_level = ref_el_pt->refinement_level();
5988
5989 // Go up the tree to the correct level
5991
5992 if (haloed_el_level == min_ref)
5993 {
5994 // Already at the correct level
5995 el_pt = ref_el_pt;
5996 }
5997 else
5998 {
5999 // Need to go up the tree to the father element at min_ref
6001 ref_el_pt->get_father_at_refinement_level(min_ref,
6002 father_el_pt);
6004 }
6005
6006 if (halo_kept[e] == 1)
6007 {
6008 // I am being haloed by processor dd
6009 // Only keep it if it's not already in the storage
6010 bool already_root_haloed = false;
6011 unsigned n_root_haloed =
6013 for (unsigned e_root = 0; e_root < n_root_haloed; e_root++)
6014 {
6016 {
6017 already_root_haloed = true;
6018 break;
6019 }
6020 }
6022 {
6024 }
6025 }
6026 }
6027 }
6028 }
6029 else
6030 {
6031 // If we're dealing with my halo elements:
6032 if (dd == my_rank)
6033 {
6034 // Find (current) halo elements on processor dd whose non-halo
6035 // is on processor d
6037 this->halo_element_pt(d));
6038
6039 // Create a vector of ints to indicate if the halo
6040 // element was kept
6041 unsigned nelem = halo_elem_pt.size();
6043 for (unsigned e = 0; e < nelem; e++)
6044 {
6046 dynamic_cast<RefineableElement*>(halo_elem_pt[e]);
6047
6048 // An element should only be kept if its refinement
6049 // level is the same as the minimum refinement level
6050 unsigned halo_el_level = ref_el_pt->refinement_level();
6051
6052 // Go up the tree to the correct level
6054 if (halo_el_level == min_ref)
6055 {
6056 // Already at the correct level
6057 el_pt = ref_el_pt;
6058 }
6059 else
6060 {
6061 // Need to go up the tree to the father element at min_ref
6063 ref_el_pt->get_father_at_refinement_level(min_ref,
6064 father_el_pt);
6066 }
6067
6069 {
6070 halo_kept[e] = 1;
6071 }
6072 }
6073
6074 // Now send this vector to processor d to tell it which of
6075 // the haloed elements (which are listed in the same order)
6076 // are to be retained as haloed elements.
6077 if (nelem != 0)
6078 {
6079 MPI_Send(
6080 &halo_kept[0], nelem, MPI_INT, d, 0, Comm_pt->mpi_comm());
6081 }
6082 }
6083 }
6084 }
6085 }
6086 }
6087
6089 {
6091 oomph_info << "Time for pt2pt comms of retention pattern in "
6092 << " Mesh::prune_halo_elements_and_nodes(): "
6093 << t_end - t_start << std::endl;
6095 }
6096
6097 // MemoryUsage::doc_memory_usage("after pt2pt comms of retention
6098 // pattern");
6099
6100
6101 // Backup pointers to nodes in this mesh
6102 nnod = this->nnode();
6104 for (unsigned j = 0; j < nnod; j++)
6105 {
6106 backed_up_nod_pt[j] = this->node_pt(j);
6107 }
6108
6109 // Flush the mesh storage
6111
6112 // Storage for non-active trees/elements that are to be deleted
6113 std::set<Tree*> trees_to_be_deleted_pt;
6114
6115 // Loop over all backed-up elements
6117 for (unsigned e = 0; e < nelem; e++)
6118 {
6120 dynamic_cast<RefineableElement*>(backed_up_el_pt[e]);
6121
6122 // Get refinement level
6123 unsigned level = ref_el_pt->refinement_level();
6124
6125 // Go up the tree to the correct level
6127
6128 if (level == min_ref)
6129 {
6130 // Already at the correct level
6131 el_pt = ref_el_pt;
6132 }
6133 else
6134 {
6135 // Need to go up the tree to the father element at min_ref
6137 ref_el_pt->get_father_at_refinement_level(min_ref, father_el_pt);
6139 }
6140
6141 // If the base element is going to be kept, then add the current element
6142 // to the "new" mesh
6143 if (keep_element[el_pt])
6144 {
6145 this->add_element_pt(ref_el_pt);
6146 }
6147 else
6148 {
6149 // Flush the object attached to the tree for this element?
6151 dynamic_cast<RefineableElement*>(ref_el_pt);
6152 if (my_el_pt != 0)
6153 {
6154 my_el_pt->tree_pt()->flush_object();
6155 }
6156
6157 // Get associated tree root
6158 Tree* tmp_tree_root_pt = my_el_pt->tree_pt()->root_pt();
6159
6160 // Get all the tree nodes
6162 tmp_tree_root_pt->stick_all_tree_nodes_into_vector(
6164
6165 // Loop over all of them and delete associated object/
6166 // and flush any record of it in tree
6167 unsigned n_tree = tmp_all_tree_nodes_pt.size();
6168 for (unsigned j = 0; j < n_tree; j++)
6169 {
6170 if (tmp_all_tree_nodes_pt[j]->object_pt() != 0)
6171 {
6172 unsigned lev =
6173 tmp_all_tree_nodes_pt[j]->object_pt()->refinement_level();
6174 if (lev <= min_ref)
6175 {
6176 if (!keep_element[tmp_all_tree_nodes_pt[j]->object_pt()])
6177 {
6179 }
6180 }
6181 }
6182 }
6183
6184 // Delete the element
6185 deleted_element_pt.push_back(ref_el_pt);
6186 delete ref_el_pt;
6187 }
6188 }
6189
6190 // MemoryUsage::doc_memory_usage("before deleting superfluous elements");
6191
6192 // Delete superfluous elements
6193 for (std::set<Tree*>::iterator it = trees_to_be_deleted_pt.begin();
6194 it != trees_to_be_deleted_pt.end();
6195 it++)
6196 {
6197 Tree* tree_pt = (*it);
6198 if (tree_pt->object_pt() != 0)
6199 {
6200 deleted_element_pt.push_back(tree_pt->object_pt());
6201 delete tree_pt->object_pt();
6202 tree_pt->flush_object();
6203 }
6204 }
6205
6206 // MemoryUsage::doc_memory_usage("after deleting superfluous elements");
6207
6208
6209 // Wipe the storage scheme for (root) halo(ed) elements and then re-assign
6210 Root_haloed_element_pt.clear();
6211 Root_halo_element_pt.clear();
6212 for (int domain = 0; domain < n_proc; domain++)
6213 {
6215 for (unsigned e = 0; e < nelem; e++)
6216 {
6217 Root_halo_element_pt[domain].push_back(
6219 }
6220
6222 for (unsigned e = 0; e < nelem; e++)
6223 {
6224 Root_haloed_element_pt[domain].push_back(
6226 }
6227 }
6228
6229 // MemoryUsage::doc_memory_usage(
6230 // "after wiping storage scheme for root halo/ed els");
6231
6232 // Loop over all retained elements at this level and mark their nodes
6233 //-------------------------------------------------------------------
6234 // as to be retained too (some double counting going on here)
6235 //-----------------------------------------------------------
6236 nelem = this->nelement();
6237 for (unsigned e = 0; e < nelem; e++)
6238 {
6240
6241 // Loop over nodes
6242 unsigned nnod = el_pt->nnode();
6243 for (unsigned j = 0; j < nnod; j++)
6244 {
6245 Node* nod_pt = el_pt->node_pt(j);
6246 nod_pt->set_non_obsolete();
6247 }
6248 }
6249
6250 // Complete rebuild of mesh by adding retained nodes
6251 // Note that they are added in the order in which they
6252 // occured in the original mesh as this guarantees the
6253 // synchronisity between the serialised access to halo
6254 // and haloed nodes from different processors.
6256 for (unsigned j = 0; j < nnod; j++)
6257 {
6259 if (!nod_pt->is_obsolete())
6260 {
6261 // Not obsolete so add it back to the mesh
6262 this->add_node_pt(nod_pt);
6263 }
6264 }
6265
6266 // MemoryUsage::doc_memory_usage("after adding nodes back in");
6267
6268 // Prune and rebuild mesh
6269 //-----------------------
6270
6271 // Now remove the pruned nodes from the boundary lookup scheme
6272 this->prune_dead_nodes();
6273
6274 // MemoryUsage::doc_memory_usage("after prune dead nodes");
6275
6276 // And finally re-setup the boundary lookup scheme for elements
6278
6279 // MemoryUsage::doc_memory_usage("after setup boundary info");
6280
6281 // Re-setup tree forest if needed. (Call this every time even if
6282 // a (distributed) mesh has no elements on this processor.
6283 // We still need to participate in communication.)
6285 dynamic_cast<TreeBasedRefineableMeshBase*>(this);
6286 if (ref_mesh_pt != 0)
6287 {
6288 ref_mesh_pt->setup_tree_forest();
6289 }
6290
6291 // MemoryUsage::doc_memory_usage("after setup tree forest");
6292
6294 {
6297 << "Time for local rebuild of mesh from retained elements in "
6298 << " Mesh::prune_halo_elements_and_nodes(): " << t_end - t_start
6299 << std::endl;
6301 }
6302
6303 // Classify nodes
6305
6307 {
6309 oomph_info << "Time for Mesh::classify_halo_and_haloed_nodes() "
6310 << " Mesh::prune_halo_elements_and_nodes(): "
6311 << t_end - t_start << std::endl;
6313 }
6314
6315 // MemoryUsage::doc_memory_usage("after
6316 // classify_halo_and_haloed_nodes()");
6317
6318 // Doc?
6319 //-----
6320 if (doc_info.is_doc_enabled())
6321 {
6322 oomph_info << "Outputting distribution in " << doc_info.directory()
6323 << " " << doc_info.number() << std::endl;
6324 doc_mesh_distribution(doc_info);
6325 }
6326
6327 // Finally: Reorder the nodes within the mesh's node vector
6328 // to establish a standard ordering regardless of the sequence
6329 // of mesh refinements -- this is required to allow dump/restart
6330 // on refined meshes
6331 this->reorder_nodes();
6332
6333
6335 {
6336 t_end = TimingHelpers::timer();
6337 oomph_info << "Time for Mesh::reorder_nodes() "
6338 << " Mesh::prune_halo_elements_and_nodes(): "
6339 << t_end - t_start << std::endl;
6341 }
6342
6343 // MemoryUsage::doc_memory_usage("after reorder nodes");
6344
6345 // Doc stats
6346 if (report_stats)
6347 {
6349 << "\n----------------------------------------------------\n";
6350 oomph_info << "After pruning: Processor " << my_rank << " holds "
6351 << this->nelement() << " elements of which "
6352 << this->nroot_halo_element()
6353 << " are root halo elements \n while "
6354 << this->nroot_haloed_element()
6355 << " are root haloed elements" << std::endl;
6356
6357 // Report total number of halo(ed) and shared nodes for this process
6358 oomph_info << "After pruning: Processor " << my_rank << " holds "
6359 << this->nnode() << " nodes of which " << this->nhalo_node()
6360 << " are halo nodes \n while " << this->nhaloed_node()
6361 << " are haloed nodes, and " << this->nshared_node()
6362 << " are shared nodes." << std::endl;
6363
6364 // Report number of halo(ed) and shared nodes with each domain
6365 // from the current process
6366 for (int iproc = 0; iproc < n_proc; iproc++)
6367 {
6368 oomph_info << "After pruning: With process " << iproc
6369 << ", there are " << this->nhalo_node(iproc)
6370 << " halo nodes, and " << std::endl
6371 << this->nhaloed_node(iproc) << " haloed nodes, and "
6372 << this->nshared_node(iproc) << " shared nodes"
6373 << std::endl;
6374 }
6376 << "----------------------------------------------------\n\n";
6377 }
6378 }
6379
6380 // MemoryUsage::doc_memory_usage("end of mesh level prune");
6381 }
6382
6383
6384 //========================================================================
6385 /// Get efficiency of mesh distribution: In an ideal distribution
6386 /// without halo overhead, each processor would only hold its own
6387 /// elements. Efficieny per processor = (number of non-halo elements)/
6388 /// (total number of elements).
6389 //========================================================================
6391 double& max_efficiency,
6392 double& min_efficiency)
6393 {
6394 // Storage for number of processors and current processor
6395 int n_proc = Comm_pt->nproc();
6396 int my_rank = Comm_pt->my_rank();
6397
6398 // Create vector to hold number of elements and halo elements
6401
6402 // Count total number of halo elements
6403 unsigned count = 0;
6404 for (int d = 0; d < n_proc; d++)
6405 {
6407 count += halo_elem_pt.size();
6408 }
6409
6410 // Stick own number into appropriate entry
6412 int n_element_local = nelement();
6413
6414 // Gather information on root processor: First argument group
6415 // specifies what is to be sent (one int from each procssor, indicating
6416 // the number of elements on it), the second group indicates where
6417 // the results are to be gathered (in rank order) on root processor.
6419 1,
6420 MPI_INT,
6421 &nhalo_elements[0],
6422 1,
6423 MPI_INT,
6424 0,
6425 Comm_pt->mpi_comm());
6427 1,
6428 MPI_INT,
6429 &n_elements[0],
6430 1,
6431 MPI_INT,
6432 0,
6433 Comm_pt->mpi_comm());
6434
6435 // Initialise stats
6436 av_efficiency = 0.0;
6437 double max = -1.0;
6438 double min = 1000000000.0;
6439
6440 if (my_rank == 0)
6441 {
6442 for (int i = 0; i < n_proc; i++)
6443 {
6444 unsigned nel = n_elements[i];
6445 double eff = 0.0;
6446 if (nel != 0)
6447 {
6449 }
6450 av_efficiency += eff;
6451 if (eff > max) max = eff;
6452 if (eff < min) min = eff;
6453 }
6455 }
6456
6457 // Now broadcast the result back out
6458 MPI_Bcast(&max, 1, MPI_DOUBLE, 0, Comm_pt->mpi_comm());
6459 MPI_Bcast(&min, 1, MPI_DOUBLE, 0, Comm_pt->mpi_comm());
6460 MPI_Bcast(&av_efficiency, 1, MPI_DOUBLE, 0, Comm_pt->mpi_comm());
6461
6462 max_efficiency = max;
6464 }
6465
6466
6467 //========================================================================
6468 /// Doc the mesh distribution
6469 //========================================================================
6471 {
6472 // Storage for current processor and number of processors
6473 int my_rank = Comm_pt->my_rank();
6474 int n_proc = Comm_pt->nproc();
6475
6476 std::ostringstream filename;
6477 std::ostringstream filename2;
6478 std::ofstream some_file;
6479 std::ofstream some_file2;
6480
6481 // Doc elements on this processor
6482 filename << doc_info.directory() << "/" << doc_info.label()
6483 << "elements_on_proc" << my_rank << "_" << doc_info.number()
6484 << ".dat";
6485 some_file.open(filename.str().c_str());
6486 this->output(some_file, 5);
6487 some_file.close();
6488
6489 // Doc non-halo elements on this processor
6490 filename.str("");
6491 filename << doc_info.directory() << "/" << doc_info.label()
6492 << "non_halo_elements_on_proc" << my_rank << "_"
6493 << doc_info.number() << ".dat";
6494 some_file.open(filename.str().c_str());
6495
6496 // Get to elements on processor
6497 unsigned nelem = this->nelement();
6498 for (unsigned e = 0; e < nelem; e++)
6499 {
6500 // Get the element
6502 // Only output if not a halo element
6503 if (!el_pt->is_halo())
6504 {
6505 FiniteElement* f_el_pt = dynamic_cast<FiniteElement*>(el_pt);
6506 // Indicate a generalised element
6507 if (f_el_pt == 0)
6508 {
6509 some_file << "Generalised Element " << e << "\n";
6510 }
6511 else
6512 // output if non-halo and a finite element
6513 {
6515 }
6516 }
6517 }
6518
6519 some_file.close();
6520
6521
6522 // Doc halo elements on this processor
6523 filename.str("");
6524 filename << doc_info.directory() << "/" << doc_info.label()
6525 << "halo_elements_on_proc" << my_rank << "_" << doc_info.number()
6526 << ".dat";
6527 some_file.open(filename.str().c_str());
6528 for (int domain = 0; domain < n_proc; domain++)
6529 {
6530 filename2.str("");
6531 filename2 << doc_info.directory() << "/" << doc_info.label()
6532 << "halo_elements_with_proc" << domain << "_on_proc" << my_rank
6533 << "_" << doc_info.number() << ".dat";
6534 some_file2.open(filename2.str().c_str());
6535
6536 // Get vector of halo elements by copy operation
6538 unsigned nelem = halo_elem_pt.size();
6539 for (unsigned e = 0; e < nelem; e++)
6540 {
6541 FiniteElement* f_el_pt = dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
6542 if (f_el_pt != 0)
6543 {
6544#ifdef PARANOID
6545 // Check if it's refineable and if so if it's a leaf.
6546 // If not something must have gone wrong.
6548 dynamic_cast<RefineableElement*>(f_el_pt);
6549 if (ref_el_pt != 0)
6550 {
6551 if (!(ref_el_pt->tree_pt()->is_leaf()))
6552 {
6553 std::ostringstream error_message;
6554 error_message
6555 << "Haloed element is not a leaf element. This shouldn't happen"
6556 << std::endl;
6557 error_message << "Here are the nodal positions: " << std::endl;
6558 unsigned nnod = ref_el_pt->nnode();
6559 for (unsigned j = 0; j < nnod; j++)
6560 {
6562 unsigned n_dim = nod_pt->ndim();
6563 for (unsigned i = 0; i < n_dim; i++)
6564 {
6565 error_message << nod_pt->x(i) << " ";
6566 }
6567 error_message << "\n";
6568 throw OomphLibError(error_message.str(),
6571 }
6572 }
6573 }
6574#endif
6575
6578 }
6579 // Indicate a generalised element
6580 else
6581 {
6582 some_file << "Generalised Element " << e << "\n";
6583 some_file2 << "Generalised Element " << e << "\n";
6584 }
6585 }
6586 some_file2.close();
6587 }
6588 some_file.close();
6589
6590 // Doc haloed elements on this processor
6591 filename.str("");
6592 filename << doc_info.directory() << "/" << doc_info.label()
6593 << "haloed_elements_on_proc" << my_rank << "_" << doc_info.number()
6594 << ".dat";
6595 some_file.open(filename.str().c_str());
6596 for (int domain = 0; domain < n_proc; domain++)
6597 {
6598 filename2.str("");
6599 filename2 << doc_info.directory() << "/" << doc_info.label()
6600 << "haloed_elements_with_proc" << domain << "_on_proc"
6601 << my_rank << "_" << doc_info.number() << ".dat";
6602 some_file2.open(filename2.str().c_str());
6603
6604 // Get vector of haloed elements by copy operation
6606 this->haloed_element_pt(domain));
6607 unsigned nelem = haloed_elem_pt.size();
6608 for (unsigned e = 0; e < nelem; e++)
6609 {
6610 // Is it a finite element
6612 dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
6613 if (finite_el_pt != 0)
6614 {
6615#ifdef PARANOID
6616 // Check if it's refineable and if so if it's a leaf.
6617 // If not something must have gone wrong.
6619 dynamic_cast<RefineableElement*>(finite_el_pt);
6620 if (ref_el_pt != 0)
6621 {
6622 if (!(ref_el_pt->tree_pt()->is_leaf()))
6623 {
6624 std::ostringstream error_message;
6625 error_message
6626 << "Haloed element is not a leaf element. This shouldn't happen"
6627 << std::endl;
6628 error_message << "Here are the nodal positions: " << std::endl;
6629 unsigned nnod = ref_el_pt->nnode();
6630 for (unsigned j = 0; j < nnod; j++)
6631 {
6633 unsigned n_dim = nod_pt->ndim();
6634 for (unsigned i = 0; i < n_dim; i++)
6635 {
6636 error_message << nod_pt->x(i) << " ";
6637 }
6638 error_message << "\n";
6639 throw OomphLibError(error_message.str(),
6642 }
6643 }
6644 }
6645#endif
6646
6649 }
6650 // Indicate a generalised element
6651 else
6652 {
6653 some_file << "Generalised Element " << e << "\n";
6654 some_file2 << "Generalised Element " << e << "\n";
6655 }
6656 }
6657 some_file2.close();
6658 }
6659 some_file.close();
6660
6661
6662 // Doc non-halo nodes on this processor
6663 filename.str("");
6664 filename << doc_info.directory() << "/" << doc_info.label()
6665 << "non_halo_nodes_on_proc" << my_rank << "_" << doc_info.number()
6666 << ".dat";
6667 some_file.open(filename.str().c_str());
6668 unsigned nnod = this->nnode();
6669 for (unsigned j = 0; j < nnod; j++)
6670 {
6671 Node* nod_pt = this->node_pt(j);
6672 if (!nod_pt->is_halo())
6673 {
6674 unsigned ndim = nod_pt->ndim();
6675 for (unsigned i = 0; i < ndim; i++)
6676 {
6677 some_file << nod_pt->x(i) << " ";
6678 }
6679 some_file << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6680 << " " << nod_pt->is_halo() << " " << nod_pt->hang_code()
6681 << std::endl;
6682 }
6683 }
6684 some_file.close();
6685
6686
6687 // Doc nodes on this processor
6688 filename.str("");
6689 filename << doc_info.directory() << "/" << doc_info.label()
6690 << "nodes_on_proc" << my_rank << "_" << doc_info.number()
6691 << ".dat";
6692 some_file.open(filename.str().c_str());
6693 for (unsigned j = 0; j < nnod; j++)
6694 {
6695 Node* nod_pt = this->node_pt(j);
6696 unsigned ndim = nod_pt->ndim();
6697 for (unsigned i = 0; i < ndim; i++)
6698 {
6699 some_file << nod_pt->x(i) << " ";
6700 }
6701 some_file << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID() << " "
6702 << nod_pt->is_halo() << " " << nod_pt->hang_code() << std::endl;
6703 }
6704 some_file.close();
6705
6706 // Doc solid nodes on this processor
6707 filename.str("");
6708 filename << doc_info.directory() << "/" << doc_info.label()
6709 << "solid_nodes_on_proc" << my_rank << "_" << doc_info.number()
6710 << ".dat";
6711 some_file.open(filename.str().c_str());
6712 unsigned nsnod = this->nnode();
6713 for (unsigned j = 0; j < nsnod; j++)
6714 {
6715 Node* nod_pt = this->node_pt(j);
6716 SolidNode* solid_nod_pt = dynamic_cast<SolidNode*>(nod_pt);
6717 if (solid_nod_pt != 0)
6718 {
6719 unsigned ndim = solid_nod_pt->ndim();
6720 for (unsigned i = 0; i < ndim; i++)
6721 {
6722 some_file << nod_pt->x(i) << " ";
6723 }
6724 some_file << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6725 << " " << nod_pt->is_halo() << " " << nod_pt->hang_code()
6726 << "\n";
6727 }
6728 }
6729 some_file.close();
6730
6731
6732 // Doc halo nodes on this processor
6733 filename.str("");
6734 filename << doc_info.directory() << "/" << doc_info.label()
6735 << "halo_nodes_on_proc" << my_rank << "_" << doc_info.number()
6736 << ".dat";
6737 some_file.open(filename.str().c_str());
6738 for (int domain = 0; domain < n_proc; domain++)
6739 {
6740 filename2.str("");
6741 filename2 << doc_info.directory() << "/" << doc_info.label()
6742 << "halo_nodes_with_proc" << domain << "_on_proc" << my_rank
6743 << "_" << doc_info.number() << ".dat";
6744 some_file2.open(filename2.str().c_str());
6745 unsigned nnod = this->nhalo_node(domain);
6746 for (unsigned j = 0; j < nnod; j++)
6747 {
6748 Node* nod_pt = this->halo_node_pt(domain, j);
6749 unsigned ndim = nod_pt->ndim();
6750 for (unsigned i = 0; i < ndim; i++)
6751 {
6752 some_file << nod_pt->x(i) << " ";
6753 some_file2 << nod_pt->x(i) << " ";
6754 }
6755 some_file << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6756 << " " << nod_pt->is_halo() << " " << nod_pt->hang_code()
6757 << "\n";
6758 some_file2 << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6759 << " " << nod_pt->is_halo() << " " << nod_pt->hang_code()
6760 << "\n";
6761 }
6762 some_file2.close();
6763 }
6764 some_file.close();
6765
6766
6767 // Doc haloed nodes on this processor
6768 filename.str("");
6769 filename << doc_info.directory() << "/" << doc_info.label()
6770 << "haloed_nodes_on_proc" << my_rank << "_" << doc_info.number()
6771 << ".dat";
6772 some_file.open(filename.str().c_str());
6773 for (int domain = 0; domain < n_proc; domain++)
6774 {
6775 filename2.str("");
6776 filename2 << doc_info.directory() << "/" << doc_info.label()
6777 << "haloed_nodes_with_proc" << domain << "_on_proc" << my_rank
6778 << "_" << doc_info.number() << ".dat";
6779 some_file2.open(filename2.str().c_str());
6780 unsigned nnod = this->nhaloed_node(domain);
6781 for (unsigned j = 0; j < nnod; j++)
6782 {
6783 Node* nod_pt = this->haloed_node_pt(domain, j);
6784 unsigned ndim = nod_pt->ndim();
6785 for (unsigned i = 0; i < ndim; i++)
6786 {
6787 some_file << nod_pt->x(i) << " ";
6788 some_file2 << nod_pt->x(i) << " ";
6789 }
6790 some_file << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6791 << " " << nod_pt->is_halo() << " " << nod_pt->hang_code()
6792 << "\n";
6793 some_file2 << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6794 << " " << nod_pt->is_halo() << " " << nod_pt->hang_code()
6795 << "\n";
6796 }
6797 some_file2.close();
6798 }
6799 some_file.close();
6800
6801
6802 // Doc shared nodes on this processor
6803 filename.str("");
6804 filename << doc_info.directory() << "/" << doc_info.label()
6805 << "shared_nodes_on_proc" << my_rank << "_" << doc_info.number()
6806 << ".dat";
6807 some_file.open(filename.str().c_str());
6808 for (int domain = 0; domain < n_proc; domain++)
6809 {
6810 filename2.str("");
6811 filename2 << doc_info.directory() << "/" << doc_info.label()
6812 << "shared_nodes_with_proc" << domain << "_on_proc" << my_rank
6813 << "_" << doc_info.number() << ".dat";
6814 some_file2.open(filename2.str().c_str());
6815 unsigned nnod = this->nshared_node(domain);
6816 for (unsigned j = 0; j < nnod; j++)
6817 {
6818 Node* nod_pt = this->shared_node_pt(domain, j);
6819 unsigned ndim = nod_pt->ndim();
6820 for (unsigned i = 0; i < ndim; i++)
6821 {
6822 some_file << nod_pt->x(i) << " ";
6823 some_file2 << nod_pt->x(i) << " ";
6824 }
6825 some_file << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6826 << " " << nod_pt->is_halo() << "\n";
6827 some_file2 << nod_pt->nvalue() << " " << nod_pt->non_halo_proc_ID()
6828 << " " << nod_pt->is_halo() << " " << nod_pt->hang_code()
6829 << "\n";
6830 }
6831 some_file2.close();
6832 }
6833 some_file.close();
6834
6835
6836 // Doc mesh
6837 filename.str("");
6838 filename << doc_info.directory() << "/" << doc_info.label() << "mesh"
6839 << my_rank << "_" << doc_info.number() << ".dat";
6840 some_file.open(filename.str().c_str());
6841 this->output(some_file, 5);
6842 some_file.close();
6843
6844
6845 // Doc boundary scheme
6846 filename.str("");
6847 filename << doc_info.directory() << "/" << doc_info.label() << "boundaries"
6848 << my_rank << "_" << doc_info.number() << ".dat";
6849 some_file.open(filename.str().c_str());
6850 this->output_boundaries(some_file);
6851 some_file.close();
6852
6853
6854 // Doc elements next to boundaries scheme
6855 // if set up
6857 {
6858 // How many finite elements are adjacent to boundary b?
6859 unsigned nbound = this->nboundary();
6860 for (unsigned b = 0; b < nbound; b++)
6861 {
6862 filename.str("");
6863 filename << doc_info.directory() << "/" << doc_info.label()
6864 << "boundary_elements" << my_rank << "_" << b << "_"
6865 << doc_info.number() << ".dat";
6866 some_file.open(filename.str().c_str());
6867 unsigned nelem = this->nboundary_element(b);
6868 for (unsigned e = 0; e < nelem; e++)
6869 {
6870 this->boundary_element_pt(b, e)->output(some_file, 5);
6871 }
6872 some_file.close();
6873 }
6874 }
6875 }
6876
6877
6878 //========================================================================
6879 /// Check the halo/haloed/shared node/element schemes on the Mesh
6880 //========================================================================
6883 {
6884 oomph_info << "Doing check_halo_schemes for mesh: " << typeid(*this).name()
6885 << std::endl;
6886
6887 // Moved this from the Problem class so that it would work better
6888 // in multiple mesh problems; there remains a simple "wrapper"
6889 // function in the Problem class that calls this for each (sub)mesh.
6890
6892 std::ostringstream filename;
6893 std::ofstream shared_file;
6894 std::ofstream halo_file;
6895 std::ofstream haloed_file;
6896 std::ofstream ext_halo_file;
6897 std::ofstream ext_haloed_file;
6898
6899 // Storage for current processor and number of processors
6900 int n_proc = Comm_pt->nproc();
6901 int my_rank = Comm_pt->my_rank();
6902
6903
6904 // Check the shared node scheme first: if this is incorrect then
6905 // the halo(ed) node scheme is likely to be wrong too
6906
6907 // Doc shared nodes lookup schemes
6908 //-------------------------------------
6909 if (doc_info.is_doc_enabled())
6910 {
6911 // Loop over domains for shared nodes
6912 for (int dd = 0; dd < n_proc; dd++)
6913 {
6914 filename.str("");
6915 filename << doc_info.directory() << "/shared_node_check"
6916 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
6917 << dd << "_" << doc_info.number() << ".dat";
6918 shared_file.open(filename.str().c_str());
6919 shared_file << "ZONE " << std::endl;
6920
6921 unsigned nnod = nshared_node(dd);
6922 for (unsigned j = 0; j < nnod; j++)
6923 {
6925 unsigned ndim = nod_pt->ndim();
6926 for (unsigned i = 0; i < ndim; i++)
6927 {
6928 shared_file << nod_pt->position(i) << " ";
6929 }
6930 shared_file << j << " " << nod_pt << std::endl;
6931 }
6932 // Dummy output for processor that doesn't share nodes
6933 // (needed for tecplot)
6934 if ((nnod == 0) && (nelement() != 0))
6935 {
6936 FiniteElement* f_el_pt = dynamic_cast<FiniteElement*>(element_pt(0));
6937 // If it's a generalised element mesh dummy output
6938 if (f_el_pt == 0)
6939 {
6940 shared_file << "0.0" << std::endl;
6941 }
6942 else
6943 {
6944 unsigned ndim = f_el_pt->node_pt(0)->ndim();
6945 if (ndim == 2)
6946 {
6947 shared_file << " 1.0 1.1 " << std::endl;
6948 }
6949 else
6950 {
6951 shared_file << " 1.0 1.1 1.1" << std::endl;
6952 }
6953 }
6954 }
6955 shared_file.close();
6956 }
6957 }
6958
6959
6960 // Check for duplicates in shared node scheme
6961 for (int d = 0; d < n_proc; d++)
6962 {
6963 unsigned n_vector = Shared_node_pt[d].size();
6964 std::set<Node*> shared_node_set;
6965 for (unsigned i = 0; i < n_vector; i++)
6966 {
6967 unsigned old_size = shared_node_set.size();
6968 shared_node_set.insert(Shared_node_pt[d][i]);
6969 unsigned new_size = shared_node_set.size();
6970 if (old_size == new_size)
6971 {
6972 Node* nod_pt = Shared_node_pt[d][i];
6973 oomph_info << "Repeated node in shared node lookup scheme: " << i
6974 << "-th node with proc " << d << " : " << nod_pt
6975 << " at: ";
6976 unsigned n = nod_pt->ndim();
6977 for (unsigned ii = 0; ii < n; ii++)
6978 {
6979 oomph_info << nod_pt->x(ii) << " ";
6980 }
6981 oomph_info << std::endl;
6982 }
6983 }
6984 unsigned n_set = shared_node_set.size();
6985 if (n_vector != n_set)
6986 {
6987 std::ostringstream warning_stream;
6989 << "WARNING: " << std::endl
6990 << "There seem to be repeated entries in shared node scheme "
6991 << "with proc " << d << ".\n"
6992 << "n_vector=" << n_vector << "; n_set=" << n_set << std::endl;
6993 if (doc_info.is_doc_enabled())
6994 {
6995 warning_stream << "Shared node scheme has been output in files like\n"
6996 << filename.str() << std::endl;
6997 }
6998 else
6999 {
7000 warning_stream << "Re-run with doc_info enabled to see where the "
7001 "shared nodes are.\n";
7002 }
7004 "Mesh::check_halo_schemes",
7006 }
7007 }
7008
7009
7010 // Check shared nodes lookup schemes
7011 //----------------------------------
7012 double max_error = 0.0;
7013
7014 // Loop over domains for shared nodes
7015 for (int d = 0; d < n_proc; d++)
7016 {
7017 // Are my shared nodes being checked?
7018 if (d == my_rank)
7019 {
7020 // Loop over domains for shared nodes
7021 for (int dd = 0; dd < n_proc; dd++)
7022 {
7023 // Don't talk to yourself
7024 if (dd != d)
7025 {
7026 // How many of my nodes are shared nodes with processor dd?
7028
7029 if (nnod_shared != 0)
7030 {
7031 // Receive from processor dd how many of his nodes are shared
7032 // with this processor
7033 int nnod_share = 0;
7034 MPI_Recv(
7035 &nnod_share, 1, MPI_INT, dd, 0, Comm_pt->mpi_comm(), &status);
7036
7037 if (nnod_shared != nnod_share)
7038 {
7039 std::ostringstream error_message;
7040
7041 error_message << "Clash in numbers of shared nodes! "
7042 << std::endl;
7043 error_message << "# of shared nodes on proc " << dd << ": "
7044 << nnod_shared << std::endl;
7045 error_message << "# of shared nodes on proc " << d << ": "
7046 << nnod_share << std::endl;
7047 error_message << "(Re-)run Problem::check_halo_schemes() with "
7048 "DocInfo object"
7049 << std::endl;
7050 error_message << "to identify the problem" << std::endl;
7051 throw OomphLibError(error_message.str(),
7054 }
7055
7056
7057 unsigned nod_dim = finite_element_pt(0)->node_pt(0)->ndim();
7058
7059 // Get strung-together nodal positions from other processor
7063 MPI_DOUBLE,
7064 dd,
7065 0,
7066 Comm_pt->mpi_comm(),
7067 &status);
7068
7069 // Check
7070 unsigned count = 0;
7071 for (int j = 0; j < nnod_share; j++)
7072 {
7074 for (unsigned i = 0; i < nod_dim; i++)
7075 {
7077 }
7079 for (unsigned i = 0; i < nod_dim; i++)
7080 {
7082 count++;
7083 }
7084 double error = 0.0;
7085 for (unsigned i = 0; i < nod_dim; i++)
7086 {
7087 error +=
7088 (x_shared[i] - x_share[i]) * (x_shared[i] - x_share[i]);
7089 }
7090 error = sqrt(error);
7091
7092 // Doc if relevant
7094 {
7095 oomph_info << "Error in shared nodes: ";
7096 for (unsigned i = 0; i < nod_dim; i++)
7097 {
7098 oomph_info << x_shared[i] << " ";
7099 }
7100 for (unsigned i = 0; i < nod_dim; i++)
7101 {
7102 oomph_info << x_share[i] << " ";
7103 }
7104 oomph_info << error << std::endl;
7105 oomph_info << "shared node: " << shared_node_pt(dd, j)
7106 << std::endl;
7107 if (shared_node_pt(dd, j)->is_hanging())
7108 {
7109 oomph_info << "shared node: " << shared_node_pt(dd, j)
7110 << " is hanging with masters" << std::endl;
7111 for (unsigned m = 0;
7113 m++)
7114 {
7116 << "master: "
7118 m)
7119 << std::endl;
7120 }
7121 }
7122 }
7123
7124 // Keep tracking max
7125 if (error > max_error)
7126 {
7127 max_error = error;
7128 }
7129 }
7130 }
7131 }
7132 }
7133 }
7134 // My shared nodes are not being checked: Send my shared nodes
7135 // to the other processor
7136 else
7137 {
7138 int nnod_share = nshared_node(d);
7139
7140 if (nnod_share != 0)
7141 {
7142 // Send it across to the processor whose shared nodes are being
7143 // checked
7144 MPI_Send(&nnod_share, 1, MPI_INT, d, 0, Comm_pt->mpi_comm());
7145
7146 unsigned nod_dim = finite_element_pt(0)->node_pt(0)->ndim();
7147
7148 // Now string together the nodal positions of all shared nodes
7150 unsigned count = 0;
7151 for (int j = 0; j < nnod_share; j++)
7152 {
7153 for (unsigned i = 0; i < nod_dim; i++)
7154 {
7156 count++;
7157 }
7158 }
7159 // Send it across to the processor whose shared nodes are being
7160 // checked
7163 MPI_DOUBLE,
7164 d,
7165 0,
7166 Comm_pt->mpi_comm());
7167 }
7168 }
7169 }
7170
7171 oomph_info << "Max. error for shared nodes " << max_error << std::endl;
7172 if (max_error > max_permitted_error_for_halo_check)
7173 {
7174 std::ostringstream error_message;
7175 error_message << "This is bigger than the permitted threshold "
7176 << max_permitted_error_for_halo_check << std::endl;
7177 error_message
7178 << "If you believe this to be acceptable for your problem\n"
7179 << "increase Problem::Max_permitted_error_for_halo_check and re-run \n";
7180 throw OomphLibError(
7181 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
7182 }
7183
7184 // Now check the halo/haloed element lookup scheme
7185
7186 // Doc halo/haoloed element lookup schemes
7187 //-----------------------------------------
7188 if (doc_info.is_doc_enabled())
7189 {
7190 // Loop over domains for halo elements
7191 for (int dd = 0; dd < n_proc; dd++)
7192 {
7193 filename.str("");
7194 filename << doc_info.directory() << "/halo_element_check"
7195 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
7196 << dd << "_" << doc_info.number() << ".dat";
7197 halo_file.open(filename.str().c_str());
7198
7199 // Get vectors of halo/haloed elements by copy operation
7201
7202 unsigned nelem = halo_elem_pt.size();
7203
7204 for (unsigned e = 0; e < nelem; e++)
7205 {
7206 halo_file << "ZONE " << std::endl;
7207 // Can I cast to a finite element
7209 dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
7210 if (finite_el_pt != 0)
7211 {
7212 unsigned nnod = finite_el_pt->nnode();
7213 for (unsigned j = 0; j < nnod; j++)
7214 {
7216 unsigned ndim = nod_pt->ndim();
7217 for (unsigned i = 0; i < ndim; i++)
7218 {
7219 halo_file << nod_pt->position(i) << " ";
7220 }
7221 halo_file << std::endl;
7222 }
7223 }
7224 }
7225 halo_file.close();
7226 }
7227
7228 // Loop over domains for halo elements
7229 for (int d = 0; d < n_proc; d++)
7230 {
7231 filename.str("");
7232 filename << doc_info.directory() << "/haloed_element_check"
7233 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
7234 << d << "_" << doc_info.number() << ".dat";
7235 haloed_file.open(filename.str().c_str());
7236
7237 // Get vectors of halo/haloed elements by copy operation
7239
7240 unsigned nelem2 = haloed_elem_pt.size();
7241 for (unsigned e = 0; e < nelem2; e++)
7242 {
7243 haloed_file << "ZONE " << std::endl;
7244 // Is it a finite element
7246 dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
7247 if (finite_el_pt != 0)
7248 {
7249 // Check if it's refineable and if so if it's a leaf.
7250 // If not something must have gone wrong.
7252 dynamic_cast<RefineableElement*>(finite_el_pt);
7253 if (ref_el_pt != 0)
7254 {
7255 if (!(ref_el_pt->tree_pt()->is_leaf()))
7256 {
7257 std::ostringstream error_message;
7258 error_message << "Haloed element is not a leaf element. This "
7259 "shouldn't happen"
7260 << std::endl;
7261 error_message << "Here are the nodal positions: " << std::endl;
7262 unsigned nnod = ref_el_pt->nnode();
7263 for (unsigned j = 0; j < nnod; j++)
7264 {
7266 unsigned n_dim = nod_pt->ndim();
7267 for (unsigned i = 0; i < n_dim; i++)
7268 {
7269 error_message << nod_pt->x(i) << " ";
7270 }
7271 error_message << "\n";
7272 throw OomphLibError(error_message.str(),
7275 }
7276 }
7277 }
7278 unsigned nnod2 = finite_el_pt->nnode();
7279 for (unsigned j = 0; j < nnod2; j++)
7280 {
7282 unsigned ndim = nod_pt->ndim();
7283 for (unsigned i = 0; i < ndim; i++)
7284 {
7285 haloed_file << nod_pt->position(i) << " ";
7286 }
7287 haloed_file << std::endl;
7288 }
7289 }
7290 }
7291 haloed_file.close();
7292 }
7293 } // end of if doc flag
7294
7295 // Check halo/haloed element lookup schemes
7296 //-----------------------------------------
7297 max_error = 0.0;
7298 bool shout = false;
7299 bool shout_and_terminate = false;
7300
7301 // Loop over domains for haloed elements
7302 for (int d = 0; d < n_proc; d++)
7303 {
7304 // Are my haloed elements being checked?
7305 if (d == my_rank)
7306 {
7307 // Loop over domains for halo elements
7308 for (int dd = 0; dd < n_proc; dd++)
7309 {
7310 // Don't talk to yourself
7311 if (dd != d)
7312 {
7313 // Get vectors of haloed elements by copy operation
7315
7316 // How many of my elements are haloed elements whose halo
7317 // counterpart is located on processor dd?
7319
7320 if (nelem_haloed != 0)
7321 {
7322 // Receive from processor dd how many of his elements are halo
7323 // nodes whose non-halo counterparts are located here
7324 int nelem_halo = 0;
7325 MPI_Recv(
7326 &nelem_halo, 1, MPI_INT, dd, 0, Comm_pt->mpi_comm(), &status);
7327 if (nelem_halo != nelem_haloed)
7328 {
7329 std::ostringstream error_message;
7330 error_message
7331 << "Clash in numbers of halo and haloed elements! "
7332 << std::endl;
7333 error_message << "# of haloed elements whose halo counterpart "
7334 "lives on proc "
7335 << dd << ": " << nelem_haloed << std::endl;
7336 error_message << "# of halo elements whose non-halo "
7337 "counterpart lives on proc "
7338 << d << ": " << nelem_halo << std::endl;
7339 error_message << "(Re-)run Problem::check_halo_schemes() with "
7340 "DocInfo object"
7341 << std::endl;
7342 error_message << "to identify the problem" << std::endl;
7343 throw OomphLibError(error_message.str(),
7346 }
7347
7348
7349 // We can only check nodal stuff for meshes of finite elements
7350 if (dynamic_cast<FiniteElement*>(this->element_pt(0)))
7351 {
7352 // Get strung-together elemental nodal positions
7353 // from other processor
7354 // unsigned nnod_per_el=finite_element_pt(0)->nnode();
7355 unsigned nod_dim = finite_element_pt(0)->node_pt(0)->ndim();
7356 // Vector<double> other_nodal_positions
7357 // (nod_dim*nnod_per_el*nelem_halo);
7358 // MPI_Recv(&other_nodal_positions[0],nod_dim*nnod_per_el*nelem_halo,
7359 // MPI_DOUBLE,dd,0,comm_pt->mpi_comm(),&status);
7360 unsigned n_nodal_positions = 0;
7362 1,
7364 dd,
7365 0,
7366 Comm_pt->mpi_comm(),
7367 &status);
7369 if (n_nodal_positions > 0)
7370 {
7373 MPI_DOUBLE,
7374 dd,
7375 0,
7376 Comm_pt->mpi_comm(),
7377 &status);
7378 }
7379
7380
7381 // Receive hanging info to be checked
7383 unsigned n_other_nodal_hangings = 0;
7385 1,
7387 dd,
7388 1,
7389 Comm_pt->mpi_comm(),
7390 &status);
7391 if (n_other_nodal_hangings > 0)
7392 {
7396 MPI_INT,
7397 dd,
7398 1,
7399 Comm_pt->mpi_comm(),
7400 &status);
7401 }
7402
7403 // If documenting, open the output files
7404 if (doc_info.is_doc_enabled())
7405 {
7406 filename.str("");
7407 filename << doc_info.directory() << "/error_haloed_check"
7408 << doc_info.label() << "_on_proc" << my_rank
7409 << "_with_proc" << dd << "_" << doc_info.number()
7410 << ".dat";
7411 haloed_file.open(filename.str().c_str());
7412 filename.str("");
7413 filename << doc_info.directory() << "/error_halo_check"
7414 << doc_info.label() << "_on_proc" << my_rank
7415 << "_with_proc" << dd << "_" << doc_info.number()
7416 << ".dat";
7417 halo_file.open(filename.str().c_str());
7418 }
7419
7420 unsigned count = 0;
7421 unsigned count_hanging = 0;
7422 for (int e = 0; e < nelem_haloed; e++)
7423 {
7425 dynamic_cast<FiniteElement*>(haloed_elem_pt[e]);
7426
7427 if (finite_el_pt != 0)
7428 {
7429 unsigned nnod_this_el = finite_el_pt->nnode();
7430 for (unsigned j = 0; j < nnod_this_el; j++)
7431 {
7433 // unsigned nod_dim = nod_pt->ndim();
7434
7435 // Testing POSITIONS, not x location
7436 // (cf hanging nodes, nodes.h)
7438 for (unsigned i = 0; i < nod_dim; i++)
7439 {
7440 x_haloed[i] = nod_pt->position(i);
7441 }
7442
7444 for (unsigned i = 0; i < nod_dim; i++)
7445 {
7447 ++count;
7448 }
7449
7450 double error = 0.0;
7451 shout = false;
7452 for (unsigned i = 0; i < nod_dim; i++)
7453 {
7454 error +=
7455 (x_haloed[i] - x_halo[i]) * (x_haloed[i] - x_halo[i]);
7456 }
7457 error = sqrt(error);
7458
7459 if (error > max_error)
7460 {
7461 max_error = error;
7462 }
7463 double tol = 1.0e-12;
7464 if (error > tol)
7465 {
7467 << "Discrepancy between nodal coordinates of halo(ed)"
7468 << "element larger than tolerance (" << tol
7469 << ")\n Error: " << error << "\n";
7470 shout = true;
7471 }
7472
7473 unsigned nval = nod_pt->nvalue();
7475 count_hanging++;
7476 if (int(nval) != nval_other)
7477 {
7479 << "Number of values of node, " << nval
7480 << ", does not match number of values on other proc, "
7481 << nval_other << std::endl;
7482 shout = true;
7483 shout_and_terminate = true;
7484 }
7485
7486 // Is other node geometrically hanging?
7487 int other_geom_hanging = 0;
7488
7489 // Check hangingness/number of master nodes
7490 for (int i = -1; i < int(nval); i++)
7491 {
7493 count_hanging++;
7494
7495 // Record geom hang status of other node
7496 if (i == -1) other_geom_hanging = nmaster_other;
7497
7498 // Value is hanging on local proc: Does it have the same
7499 // number of masters as its counterpart on other proc?
7500 if (nod_pt->is_hanging(i))
7501 {
7502 unsigned nmaster = nod_pt->hanging_pt(i)->nmaster();
7503 if (int(nmaster) != nmaster_other)
7504 {
7506 << "Number of master nodes for hanging value "
7507 << i << " of node, " << nmaster
7508 << ", does not match number of master "
7509 << "nodes on other proc, " << nmaster_other
7510 << std::endl;
7511 shout = true;
7512 shout_and_terminate = true;
7513 }
7514 }
7515 // Value is not hanging on local proc: It had better
7516 // not have any masters (i.e. be hanging) on the other
7517 // proc
7518 else
7519 {
7520 if (nmaster_other != 0)
7521 {
7523 << "Value " << i
7524 << " of node is not hanging whereas "
7525 << " node on other proc has " << nmaster_other
7526 << " masters and therefore is hanging. \n";
7527 shout = true;
7528 shout_and_terminate = true;
7529 }
7530 }
7531 }
7532
7533 if (shout)
7534 {
7535 // Report error. NOTE: ERROR IS THROWN BELOW ONCE
7536 // ALL THIS HAS BEEN PROCESSED.
7537
7539 << "Error(s) displayed above are for "
7540 << "domain with non-halo (i.e. haloed) elem: " << dd
7541 << "\n";
7543 << "Domain with halo elem: " << d
7544 << "\n";
7545 switch (nod_dim)
7546 {
7547 case 1:
7549 << "Current processor is " << my_rank << "\n"
7550 << "Nodal positions: " << x_halo[0] << "\n"
7551 << "and haloed: " << x_haloed[0]
7552 << "\n"
7553 //<< "Node pointer: " << finite_el_pt->node_pt(j)
7554 << "\n";
7555 break;
7556 case 2:
7558 << "Current processor is " << my_rank << "\n"
7559 << "Nodal positions: " << x_halo[0] << " "
7560 << x_halo[1] << "\n"
7561 << "and haloed: " << x_haloed[0] << " "
7562 << x_haloed[1]
7563 << std::endl
7564 //<< "Node pointer: " << finite_el_pt->node_pt(j)
7565 << "\n";
7566 break;
7567 case 3:
7569 << "Current processor is " << my_rank << "\n"
7570 << "Nodal positions: " << x_halo[0] << " "
7571 << x_halo[1] << " " << x_halo[2] << "\n"
7572 << "and haloed: " << x_haloed[0] << " "
7573 << x_haloed[1] << " " << x_haloed[2]
7574 << std::endl
7575 //<< "Node pointer: " << finite_el_pt->node_pt(j)
7576 << "\n";
7577 break;
7578 default:
7579 throw OomphLibError(
7580 "Nodal dimension not equal to 1, 2 or 3\n",
7583 }
7584
7585
7586 // If documenting, write to output files
7587 if (doc_info.is_doc_enabled())
7588 {
7589 for (unsigned i = 0; i < nod_dim; i++)
7590 {
7591 haloed_file << x_haloed[i] << " ";
7592 halo_file << x_halo[i] << " ";
7593 }
7594 haloed_file << error << " " << my_rank << " " << dd
7595 << " "
7597 << std::endl;
7598 halo_file << error << " " << my_rank << " " << dd
7599 << " " << other_geom_hanging << std::endl;
7600 }
7601 }
7602 } // j<nnod_per_el
7603 }
7604 } // e<nelem_haloed
7605
7606 // If documenting, close output files
7607 if (doc_info.is_doc_enabled())
7608 {
7609 haloed_file.close();
7610 halo_file.close();
7611 }
7612 }
7613 }
7614 }
7615 }
7616 }
7617 // My haloed elements are not being checked: Send my halo elements
7618 // whose non-halo counterparts are located on processor d
7619 else
7620 {
7621 // Get vectors of halo elements by copy operation
7623
7624 // How many of my elements are halo elements whose non-halo
7625 // counterpart is located on processor d?
7626 unsigned nelem_halo = halo_elem_pt.size();
7627
7628 if (nelem_halo != 0)
7629 {
7630 // Send it across to the processor whose haloed nodes are being
7631 // checked
7632 MPI_Send(&nelem_halo, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm());
7633
7634 // Only bother if the mesh consists of finite elements
7635 if (dynamic_cast<FiniteElement*>(this->element_pt(0)))
7636 {
7637 // Now string together the nodal positions of all halo nodes
7638 // Use this only to work out roughly how much space to
7639 // reserve for the vector. Then we can push data cheaply
7640 // while not assuming all elements have the same number
7641 // of nodes.
7642 unsigned nnod_first_el = finite_element_pt(0)->nnode();
7643 unsigned nod_dim = finite_element_pt(0)->node_pt(0)->ndim();
7646
7647 // Storage for hang information
7649
7650 // unsigned count=0;
7651 for (unsigned e = 0; e < nelem_halo; e++)
7652 {
7654 dynamic_cast<FiniteElement*>(halo_elem_pt[e]);
7655 if (finite_el_pt != 0)
7656 {
7657 unsigned nnod_this_el = finite_el_pt->nnode();
7658 for (unsigned j = 0; j < nnod_this_el; j++)
7659 {
7661 // unsigned nod_dim = nod_pt->ndim();
7662
7663 // Throw error if node doesn't exist
7664 if (nod_pt == 0)
7665 {
7666 // Print our nodes in element
7667 oomph_info << "element: " << finite_el_pt << std::endl;
7668 for (unsigned i = 0; i < finite_el_pt->nnode(); i++)
7669 {
7670 oomph_info << finite_el_pt->node_pt(i) << std::endl;
7671 }
7672
7673 // Throw error
7674 throw OomphLibError("Node doesn't exist!",
7677 }
7678
7679 // Testing POSITIONS, not x location (cf hanging nodes,
7680 // nodes.h)
7681 for (unsigned i = 0; i < nod_dim; i++)
7682 {
7683 // nodal_positions[count]=nod_pt->position(i);
7684 // count++;
7685 nodal_positions.push_back(nod_pt->position(i));
7686 }
7687
7688 unsigned nval = nod_pt->nvalue();
7689 nodal_hangings.push_back(nval);
7690 for (int i = -1; i < int(nval); i++)
7691 {
7692 if (nod_pt->is_hanging(i))
7693 {
7694 unsigned nmaster = nod_pt->hanging_pt(i)->nmaster();
7695 nodal_hangings.push_back(nmaster);
7696 }
7697 else
7698 {
7699 nodal_hangings.push_back(0);
7700 }
7701 }
7702 }
7703 }
7704 }
7705
7706 // Total number of nodal positions to be checked
7708
7709 // Total number of nodal hang information to be checked
7711
7712 // Send it across to the processor whose haloed elements are being
7713 // checked
7714 // MPI_Send(&nodal_positions[0],nod_dim*nnod_per_el*nelem_halo,
7715 // MPI_DOUBLE,d,0,comm_pt->mpi_comm());
7716 MPI_Send(
7717 &n_nodal_positions, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm());
7718 if (n_nodal_positions > 0)
7719 {
7722 MPI_DOUBLE,
7723 d,
7724 0,
7725 Comm_pt->mpi_comm());
7726 }
7727 MPI_Send(
7728 &n_nodal_hangings, 1, MPI_UNSIGNED, d, 1, Comm_pt->mpi_comm());
7729 if (n_nodal_hangings > 0)
7730 {
7733 MPI_INT,
7734 d,
7735 1,
7736 Comm_pt->mpi_comm());
7737 }
7738 }
7739 }
7740 }
7741 }
7742
7743 oomph_info << "Max. error for halo/haloed elements " << max_error
7744 << std::endl;
7745 if (max_error > max_permitted_error_for_halo_check)
7746 {
7747 shout_and_terminate = true;
7748 oomph_info << "This is bigger than the permitted threshold "
7749 << max_permitted_error_for_halo_check << std::endl;
7751 << "If you believe this to be acceptable for your problem\n"
7752 << "increase Problem::Max_permitted_error_for_halo_check and re-run \n";
7753 }
7754
7756 {
7757 throw OomphLibError("Error in halo checking",
7760 }
7761
7762 // Now check the halo/haloed nodes lookup schemes
7763
7764 // Doc halo/haloed nodes lookup schemes
7765 //-------------------------------------
7766 if (doc_info.is_doc_enabled())
7767 {
7768 // Loop over domains for halo nodes
7769 for (int dd = 0; dd < n_proc; dd++)
7770 {
7771 filename.str("");
7772 filename << doc_info.directory() << "/halo_node_check"
7773 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
7774 << dd << "_" << doc_info.number() << ".dat";
7775 halo_file.open(filename.str().c_str());
7776 halo_file << "ZONE " << std::endl;
7777
7778 unsigned nnod = nhalo_node(dd);
7779 for (unsigned j = 0; j < nnod; j++)
7780 {
7782 unsigned ndim = nod_pt->ndim();
7783 for (unsigned i = 0; i < ndim; i++)
7784 {
7785 halo_file << nod_pt->position(i) << " ";
7786 }
7787 halo_file << nod_pt->is_hanging() << std::endl;
7788 }
7789 // Dummy output for processor that doesn't share halo nodes
7790 // (needed for tecplot)
7791 // (This will only work if there are elements on this processor...)
7792 if ((nnod == 0) && (nelement() != 0))
7793 {
7794 FiniteElement* f_el_pt = dynamic_cast<FiniteElement*>(element_pt(0));
7795 // If it's a generalised element mesh dummy output
7796 if (f_el_pt == 0)
7797 {
7798 halo_file << "0.0" << std::endl;
7799 }
7800 else
7801 {
7802 unsigned ndim = f_el_pt->node_pt(0)->ndim();
7803 if (ndim == 2)
7804 {
7805 halo_file << " 1.0 1.1 " << std::endl;
7806 }
7807 else
7808 {
7809 halo_file << " 1.0 1.1 1.1" << std::endl;
7810 }
7811 }
7812 }
7813 halo_file.close();
7814 }
7815
7816
7817 // Loop over domains for haloed nodes
7818 for (int d = 0; d < n_proc; d++)
7819 {
7820 filename.str("");
7821 filename << doc_info.directory() << "/haloed_node_check"
7822 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
7823 << d << "_" << doc_info.number() << ".dat";
7824 haloed_file.open(filename.str().c_str());
7825 haloed_file << "ZONE " << std::endl;
7826
7827 unsigned nnod = nhaloed_node(d);
7828 for (unsigned j = 0; j < nnod; j++)
7829 {
7830 Node* nod_pt = haloed_node_pt(d, j);
7831 unsigned ndim = nod_pt->ndim();
7832 for (unsigned i = 0; i < ndim; i++)
7833 {
7834 haloed_file << nod_pt->position(i) << " ";
7835 }
7836 haloed_file << nod_pt->is_hanging() << std::endl;
7837 }
7838 // Dummy output for processor that doesn't share halo nodes
7839 // (needed for tecplot)
7840 if ((nnod == 0) && (nelement() != 0))
7841 {
7842 FiniteElement* f_el_pt = dynamic_cast<FiniteElement*>(element_pt(0));
7843 // If it's a generalised element mesh dummy output
7844 if (f_el_pt == 0)
7845 {
7846 haloed_file << "0.0" << std::endl;
7847 }
7848 else
7849 {
7850 unsigned ndim = f_el_pt->node_pt(0)->ndim();
7851 if (ndim == 2)
7852 {
7853 haloed_file << " 1.0 1.1 " << std::endl;
7854 }
7855 else
7856 {
7857 haloed_file << " 1.0 1.1 1.1" << std::endl;
7858 }
7859 }
7860 }
7861 haloed_file.close();
7862 }
7863 }
7864
7865 // Check halo/haloed nodes lookup schemes
7866 //---------------------------------------
7867 max_error = 0.0;
7868
7869 // Loop over domains for haloed nodes
7870 for (int d = 0; d < n_proc; d++)
7871 {
7872 // Are my haloed nodes being checked?
7873 if (d == my_rank)
7874 {
7875 // Loop over domains for halo nodes
7876 for (int dd = 0; dd < n_proc; dd++)
7877 {
7878 // Don't talk to yourself
7879 if (dd != d)
7880 {
7881 // How many of my nodes are haloed nodes whose halo
7882 // counterpart is located on processor dd?
7884
7885 if (nnod_haloed != 0)
7886 {
7887 // Receive from processor dd how many of his nodes are halo
7888 // nodes whose non-halo counterparts are located here
7889 int nnod_halo = 0;
7890 MPI_Recv(
7891 &nnod_halo, 1, MPI_INT, dd, 0, Comm_pt->mpi_comm(), &status);
7892
7893 if (nnod_haloed != nnod_halo)
7894 {
7895 std::ostringstream error_message;
7896
7897 error_message << "Clash in numbers of halo and haloed nodes! "
7898 << std::endl;
7899 error_message
7900 << "# of haloed nodes whose halo counterpart lives on proc "
7901 << dd << ": " << nnod_haloed << std::endl;
7902 error_message
7903 << "# of halo nodes whose non-halo counterpart lives on proc "
7904 << d << ": " << nnod_halo << std::endl;
7905 error_message
7906 << "(Re-)run Mesh::check_halo_schemes() with DocInfo object"
7907 << std::endl;
7908 error_message << "to identify the problem" << std::endl;
7909 throw OomphLibError(error_message.str(),
7912 }
7913
7914
7915 unsigned nod_dim = finite_element_pt(0)->node_pt(0)->ndim();
7916
7917 // Get strung-together nodal positions from other processor
7921 MPI_DOUBLE,
7922 dd,
7923 0,
7924 Comm_pt->mpi_comm(),
7925 &status);
7926
7927 // Check
7928 unsigned count = 0;
7929 for (int j = 0; j < nnod_halo; j++)
7930 {
7932 for (unsigned i = 0; i < nod_dim; i++)
7933 {
7935 }
7937 for (unsigned i = 0; i < nod_dim; i++)
7938 {
7940 count++;
7941 }
7942 double error = 0.0;
7943 for (unsigned i = 0; i < nod_dim; i++)
7944 {
7945 error +=
7946 (x_haloed[i] - x_halo[i]) * (x_haloed[i] - x_halo[i]);
7947 }
7948 error = sqrt(error);
7949 if (error > max_error)
7950 {
7951 max_error = error;
7952 }
7953 }
7954 }
7955 }
7956 }
7957 }
7958 // My haloed nodes are not being checked: Send my halo nodes
7959 // whose non-halo counterparts are located on processor d
7960 else
7961 {
7962 int nnod_halo = nhalo_node(d);
7963
7964 if (nnod_halo != 0)
7965 {
7966 // Send it across to the processor whose haloed nodes are being
7967 // checked
7968 MPI_Send(&nnod_halo, 1, MPI_INT, d, 0, Comm_pt->mpi_comm());
7969
7970 unsigned nod_dim = finite_element_pt(0)->node_pt(0)->ndim();
7971
7972 // Now string together the nodal positions of all halo nodes
7974 unsigned count = 0;
7975 for (int j = 0; j < nnod_halo; j++)
7976 {
7977 for (unsigned i = 0; i < nod_dim; i++)
7978 {
7980 count++;
7981 }
7982 }
7983 // Send it across to the processor whose haloed nodes are being
7984 // checked
7987 MPI_DOUBLE,
7988 d,
7989 0,
7990 Comm_pt->mpi_comm());
7991 }
7992 }
7993 }
7994
7995 oomph_info << "Max. error for halo/haloed nodes " << max_error << std::endl;
7996
7997 if (max_error > max_permitted_error_for_halo_check)
7998 {
7999 std::ostringstream error_message;
8000 error_message << "This is bigger than the permitted threshold "
8001 << max_permitted_error_for_halo_check << std::endl;
8002 error_message
8003 << "If you believe this to be acceptable for your problem\n"
8004 << "increase Problem::Max_permitted_error_for_halo_check and re-run \n";
8005 throw OomphLibError(
8006 error_message.str(), OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION);
8007 }
8008
8009
8010 // Now check the external halo/haloed element lookup scheme
8011
8012 // Doc external halo/haoloed element lookup schemes
8013 //--------------------------------------------------
8014 if (doc_info.is_doc_enabled())
8015 {
8016 // Loop over domains for external halo elements
8017 for (int dd = 0; dd < n_proc; dd++)
8018 {
8019 filename.str("");
8020 filename << doc_info.directory() << "/ext_halo_element_check"
8021 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
8022 << dd << "_" << doc_info.number() << ".dat";
8023 ext_halo_file.open(filename.str().c_str());
8025 ext_halo_file.close();
8026
8027
8028 filename.str("");
8029 filename << doc_info.directory() << "/ext_halo_node_check"
8030 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
8031 << dd << "_" << doc_info.number() << ".dat";
8032 ext_halo_file.open(filename.str().c_str());
8033
8034 // Get vectors of external halo/haloed elements by copy operation
8037
8038 unsigned nelem = ext_halo_elem_pt.size();
8039
8040 for (unsigned e = 0; e < nelem; e++)
8041 {
8042 ext_halo_file << "ZONE " << std::endl;
8043 // Can I cast to a finite element
8045 dynamic_cast<FiniteElement*>(ext_halo_elem_pt[e]);
8046 if (finite_el_pt != 0)
8047 {
8048 unsigned nnod = finite_el_pt->nnode();
8049 for (unsigned j = 0; j < nnod; j++)
8050 {
8052 unsigned ndim = nod_pt->ndim();
8053 for (unsigned i = 0; i < ndim; i++)
8054 {
8055 ext_halo_file << nod_pt->position(i) << " ";
8056 }
8057 ext_halo_file << std::endl;
8058 }
8059 }
8060 }
8061 ext_halo_file.close();
8062 }
8063
8064 // Loop over domains for external halo elements
8065 for (int d = 0; d < n_proc; d++)
8066 {
8067 filename.str("");
8068 filename << doc_info.directory() << "/ext_haloed_element_check"
8069 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
8070 << d << "_" << doc_info.number() << ".dat";
8071 ext_haloed_file.open(filename.str().c_str());
8073 ext_haloed_file.close();
8074
8075
8076 filename.str("");
8077 filename << doc_info.directory() << "/ext_haloed_node_check"
8078 << doc_info.label() << "_on_proc" << my_rank << "_with_proc"
8079 << d << "_" << doc_info.number() << ".dat";
8080 ext_haloed_file.open(filename.str().c_str());
8081
8082 // Get vectors of external halo/haloed elements by copy operation
8085
8086 unsigned nelem2 = ext_haloed_elem_pt.size();
8087 for (unsigned e = 0; e < nelem2; e++)
8088 {
8089 ext_haloed_file << "ZONE " << std::endl;
8090 // Is it a finite element
8092 dynamic_cast<FiniteElement*>(ext_haloed_elem_pt[e]);
8093 if (finite_el_pt != 0)
8094 {
8095 unsigned nnod2 = finite_el_pt->nnode();
8096 for (unsigned j = 0; j < nnod2; j++)
8097 {
8099 unsigned ndim = nod_pt->ndim();
8100 for (unsigned i = 0; i < ndim; i++)
8101 {
8102 ext_haloed_file << nod_pt->position(i) << " ";
8103 }
8104 ext_haloed_file << std::endl;
8105 }
8106 }
8107 }
8108 ext_haloed_file.close();
8109 }
8110 } // end of if doc flag
8111
8112 // Check external halo/haloed element lookup schemes
8113 //--------------------------------------------------
8114 max_error = 0.0;
8115 shout = false;
8116 shout_and_terminate = false;
8117
8118 // Loop over domains for external haloed elements
8119 for (int d = 0; d < n_proc; d++)
8120 {
8121 // Are my external haloed elements being checked?
8122 if (d == my_rank)
8123 {
8124 // Loop over domains for external halo elements
8125 for (int dd = 0; dd < n_proc; dd++)
8126 {
8127 // Don't talk to yourself
8128 if (dd != d)
8129 {
8130 // Get vectors of external haloed elements by copy operation
8133
8134 // How many of my elements are external haloed elements whose halo
8135 // counterpart is located on processor dd?
8137
8138 if (nelem_haloed != 0)
8139 {
8140 // Receive from processor dd how many of his elements are halo
8141 // nodes whose non-halo counterparts are located here
8142 int nelem_halo = 0;
8143 MPI_Recv(
8144 &nelem_halo, 1, MPI_INT, dd, 0, Comm_pt->mpi_comm(), &status);
8145 if (nelem_halo != nelem_haloed)
8146 {
8147 std::ostringstream error_message;
8148 error_message
8149 << "Clash in numbers of external halo and haloed elements! "
8150 << std::endl;
8151 error_message << "# of external haloed elements whose halo "
8152 "counterpart lives on proc "
8153 << dd << ": " << nelem_haloed << std::endl;
8154 error_message << "# of external halo elements whose non-halo "
8155 "counterpart lives on proc "
8156 << d << ": " << nelem_halo << std::endl;
8157 error_message << "(Re-)run Problem::check_halo_schemes() with "
8158 "DocInfo object"
8159 << std::endl;
8160 error_message << "to identify the problem" << std::endl;
8161 throw OomphLibError(error_message.str(),
8164 }
8165
8166
8167 // We can only check nodal stuff for meshes of finite elements
8169 dynamic_cast<FiniteElement*>(ext_haloed_elem_pt[0]);
8170 if (fe_pt != 0)
8171 {
8172 // Get strung-together elemental nodal positions
8173 // from other processor
8174 // unsigned nnod_per_el=fe_pt->nnode();
8175 unsigned nod_dim = fe_pt->node_pt(0)->ndim();
8176 // Vector<double> other_nodal_positions
8177 // (nod_dim*nnod_per_el*nelem_halo);
8178 // MPI_Recv(&other_nodal_positions[0],nod_dim*nnod_per_el*nelem_halo,
8179 // MPI_DOUBLE,dd,0,comm_pt->mpi_comm(),&status);
8180 unsigned n_nodal_positions = 0;
8182 1,
8184 dd,
8185 0,
8186 Comm_pt->mpi_comm(),
8187 &status);
8189 if (n_nodal_positions > 0)
8190 {
8193 MPI_DOUBLE,
8194 dd,
8195 0,
8196 Comm_pt->mpi_comm(),
8197 &status);
8198 }
8199
8200
8201 // Receive hanging info to be checked
8203 unsigned n_other_nodal_hangings = 0;
8205 1,
8207 dd,
8208 1,
8209 Comm_pt->mpi_comm(),
8210 &status);
8211 if (n_other_nodal_hangings > 0)
8212 {
8216 MPI_INT,
8217 dd,
8218 1,
8219 Comm_pt->mpi_comm(),
8220 &status);
8221 }
8222
8223 // If documenting, open the output files
8224 if (doc_info.is_doc_enabled())
8225 {
8226 filename.str("");
8227 filename << doc_info.directory() << "/error_ext_haloed_check"
8228 << doc_info.label() << "_on_proc" << my_rank
8229 << "_with_proc" << dd << "_" << doc_info.number()
8230 << ".dat";
8231 ext_haloed_file.open(filename.str().c_str());
8232 filename.str("");
8233 filename << doc_info.directory() << "/error_ext_halo_check"
8234 << doc_info.label() << "_on_proc" << my_rank
8235 << "_with_proc" << dd << "_" << doc_info.number()
8236 << ".dat";
8237 ext_halo_file.open(filename.str().c_str());
8238 }
8239
8240 unsigned count = 0;
8241 unsigned count_hanging = 0;
8242 for (int e = 0; e < nelem_haloed; e++)
8243 {
8245 dynamic_cast<FiniteElement*>(ext_haloed_elem_pt[e]);
8246
8247 if (finite_el_pt != 0)
8248 {
8249 unsigned nnod_this_el = finite_el_pt->nnode();
8250 for (unsigned j = 0; j < nnod_this_el; j++)
8251 {
8253 // unsigned nod_dim = mod_pt->ndim();
8254
8255 // Testing POSITIONS, not x location
8256 // (cf hanging nodes, nodes.h)
8258 for (unsigned i = 0; i < nod_dim; i++)
8259 {
8260 x_haloed[i] = nod_pt->position(i);
8261 }
8262
8264 for (unsigned i = 0; i < nod_dim; i++)
8265 {
8267 ++count;
8268 }
8269
8270 double error = 0.0;
8271 shout = false;
8272 for (unsigned i = 0; i < nod_dim; i++)
8273 {
8274 error +=
8275 (x_haloed[i] - x_halo[i]) * (x_haloed[i] - x_halo[i]);
8276 }
8277 error = sqrt(error);
8278
8279 if (error > max_error)
8280 {
8281 max_error = error;
8282 }
8283 double tol = 1.0e-12;
8284 if (error > tol)
8285 {
8286 oomph_info << "Discrepancy between nodal coordinates "
8287 "of external halo(ed)"
8288 << "element larger than tolerance (" << tol
8289 << ")\n Error: " << error << "\n";
8290 shout = true;
8291 }
8292
8293 unsigned nval = nod_pt->nvalue();
8295 count_hanging++;
8296 if (int(nval) != nval_other)
8297 {
8299 << "Number of values of node, " << nval
8300 << ", does not match number of values on other proc, "
8301 << nval_other << std::endl;
8302 shout = true;
8303 shout_and_terminate = true;
8304 }
8305
8306 // Is other node geometrically hanging?
8307 int other_geom_hanging = 0;
8308
8309 // Check hangingness/number of master nodes
8310 for (int i = -1; i < int(nval); i++)
8311 {
8313 count_hanging++;
8314
8315 // Record geom hang status of other node
8316 if (i == -1) other_geom_hanging = nmaster_other;
8317
8318 // Value is hanging on local proc: Does it have the same
8319 // number of masters as its counterpart on other proc?
8320 if (nod_pt->is_hanging(i))
8321 {
8322 unsigned nmaster = nod_pt->hanging_pt(i)->nmaster();
8323 if (int(nmaster) != nmaster_other)
8324 {
8326 << "Number of master nodes for hanging value "
8327 << i << " of node, " << nmaster
8328 << ", does not match number of master "
8329 << "nodes on other proc, " << nmaster_other
8330 << std::endl;
8331 shout = true;
8332 shout_and_terminate = true;
8333 }
8334 }
8335 // Value is not hanging on local proc: It had better
8336 // not have any masters (i.e. be hanging) on the other
8337 // proc
8338 else
8339 {
8340 if (nmaster_other != 0)
8341 {
8343 << "Value " << i
8344 << " of node is not hanging whereas "
8345 << " node on other proc has " << nmaster_other
8346 << " masters and therefore is hanging. \n";
8347 shout = true;
8348 shout_and_terminate = true;
8349 }
8350 }
8351 }
8352
8353 if (shout)
8354 {
8355 // Report error. NOTE: ERROR IS THROWN BELOW ONCE
8356 // ALL THIS HAS BEEN PROCESSED.
8357
8358 oomph_info << "Error(s) displayed above are for "
8359 << "domain with external non-halo (i.e. "
8360 "haloed) elem: "
8361 << dd << "\n";
8363 << "Domain with halo elem: " << d
8364 << "\n";
8365 switch (nod_dim)
8366 {
8367 case 1:
8369 << "Current processor is " << my_rank << "\n"
8370 << "Nodal positions: " << x_halo[0] << "\n"
8371 << "and haloed: " << x_haloed[0]
8372 << "\n"
8373 //<< "Node pointer: " << finite_el_pt->node_pt(j)
8374 << "\n";
8375 break;
8376 case 2:
8378 << "Current processor is " << my_rank << "\n"
8379 << "Nodal positions: " << x_halo[0] << " "
8380 << x_halo[1] << "\n"
8381 << "and haloed: " << x_haloed[0] << " "
8382 << x_haloed[1]
8383 << std::endl
8384 //<< "Node pointer: " << finite_el_pt->node_pt(j)
8385 << "\n";
8386 break;
8387 case 3:
8389 << "Current processor is " << my_rank << "\n"
8390 << "Nodal positions: " << x_halo[0] << " "
8391 << x_halo[1] << " " << x_halo[2] << "\n"
8392 << "and haloed: " << x_haloed[0] << " "
8393 << x_haloed[1] << " " << x_haloed[2]
8394 << std::endl
8395 //<< "Node pointer: " << finite_el_pt->node_pt(j)
8396 << "\n";
8397 break;
8398 default:
8399 throw OomphLibError(
8400 "Nodal dimension not equal to 1, 2 or 3\n",
8403 }
8404
8405
8406 // If documenting, write to output files
8407 if (doc_info.is_doc_enabled())
8408 {
8409 for (unsigned i = 0; i < nod_dim; i++)
8410 {
8411 ext_haloed_file << x_haloed[i] << " ";
8412 ext_halo_file << x_halo[i] << " ";
8413 }
8415 << error << " " << my_rank << " " << dd << " "
8417 << std::endl;
8418 ext_halo_file << error << " " << my_rank << " " << dd
8419 << " " << other_geom_hanging
8420 << std::endl;
8421 }
8422 }
8423 } // j<nnod_per_el
8424 }
8425 } // e<nelem_haloed
8426
8427 // If documenting, close output files
8428 if (doc_info.is_doc_enabled())
8429 {
8430 ext_haloed_file.close();
8431 ext_halo_file.close();
8432 }
8433 }
8434 }
8435 }
8436 }
8437 }
8438 // My external haloed elements are not being checked: Send my halo
8439 // elements whose non-halo counterparts are located on processor d
8440 else
8441 {
8442 // Get vectors of external halo elements by copy operation
8445
8446 // How many of my elements are external halo elements whose non-halo
8447 // counterpart is located on processor d?
8448 unsigned nelem_halo = ext_halo_elem_pt.size();
8449
8450 if (nelem_halo != 0)
8451 {
8452 // Send it across to the processor whose external haloed nodes
8453 // are being checked
8454 MPI_Send(&nelem_halo, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm());
8455
8456 // Only bother if the mesh consists of finite elements
8458 dynamic_cast<FiniteElement*>(ext_halo_elem_pt[0]);
8459 if (fe_pt != 0)
8460 {
8461 // Now string together the nodal positions of all halo nodes
8462 unsigned nnod_first_el = fe_pt->nnode();
8463 unsigned nod_dim = fe_pt->node_pt(0)->ndim();
8466
8467 // Storage for hang information
8469
8470 unsigned count = 0;
8471 for (unsigned e = 0; e < nelem_halo; e++)
8472 {
8474 dynamic_cast<FiniteElement*>(ext_halo_elem_pt[e]);
8475 if (finite_el_pt != 0)
8476 {
8477 unsigned nnod_this_el = finite_el_pt->nnode();
8478 for (unsigned j = 0; j < nnod_this_el; j++)
8479 {
8481
8482 // Testing POSITIONS, not x location (cf hanging nodes,
8483 // nodes.h)
8484 for (unsigned i = 0; i < nod_dim; i++)
8485 {
8486 nodal_positions.push_back(nod_pt->position(i));
8487 count++;
8488 }
8489
8490 unsigned nval = nod_pt->nvalue();
8491 nodal_hangings.push_back(nval);
8492 for (int i = -1; i < int(nval); i++)
8493 {
8494 if (nod_pt->is_hanging(i))
8495 {
8496 unsigned nmaster = nod_pt->hanging_pt(i)->nmaster();
8497 nodal_hangings.push_back(nmaster);
8498 }
8499 else
8500 {
8501 nodal_hangings.push_back(0);
8502 }
8503 }
8504 }
8505 }
8506 }
8507
8508 // Total number of nodal positions to be checked
8510
8511 // Total number of nodal hang information to be checked
8513
8514 // Send it across to the processor whose external haloed
8515 // elements are being checked
8516 // MPI_Send(&nodal_positions[0],nod_dim*nnod_per_el*nelem_halo,
8517 // MPI_DOUBLE,d,0,comm_pt->mpi_comm());
8518 MPI_Send(
8519 &n_nodal_positions, 1, MPI_UNSIGNED, d, 0, Comm_pt->mpi_comm());
8520 if (n_nodal_positions > 0)
8521 {
8524 MPI_DOUBLE,
8525 d,
8526 0,
8527 Comm_pt->mpi_comm());
8528 }
8529 MPI_Send(
8530 &n_nodal_hangings, 1, MPI_UNSIGNED, d, 1, Comm_pt->mpi_comm());
8531 if (n_nodal_hangings > 0)
8532 {
8535 MPI_INT,
8536 d,
8537 1,
8538 Comm_pt->mpi_comm());
8539 }
8540 }
8541 }
8542 }
8543 }
8544
8545 oomph_info << "Max. error for external halo/haloed elements " << max_error
8546 << std::endl;
8547 if (max_error > max_permitted_error_for_halo_check)
8548 {
8549 shout_and_terminate = true;
8550 oomph_info << "This is bigger than the permitted threshold "
8551 << max_permitted_error_for_halo_check << std::endl;
8553 << "If you believe this to be acceptable for your problem\n"
8554 << "increase Problem::Max_permitted_error_for_halo_check and re-run \n";
8555 }
8556
8558 {
8559 throw OomphLibError("Error in halo checking",
8562 }
8563 }
8564
8565
8566 //========================================================================
8567 /// Null out specified external halo node (used when deleting duplicates)
8568 //========================================================================
8570 {
8571 // Loop over all external halo nodes with specified processor
8573 unsigned n = ext_halo_node_pt.size();
8574 for (unsigned j = 0; j < n; j++)
8575 {
8576 if (ext_halo_node_pt[j] == nod_pt)
8577 {
8579 break;
8580 }
8581 }
8582 }
8583
8584
8585 //========================================================================
8586 /// Consolidate external halo node storage by removing nulled out
8587 /// pointes in external halo and haloed schemes
8588 //========================================================================
8590 {
8591 // Storage for number of processors and current processor
8592 int n_proc = Comm_pt->nproc();
8593 int my_rank = Comm_pt->my_rank();
8594
8595 // Loop over all (other) processors and store index of any nulled-out
8596 // external halo nodes in storage scheme.
8597
8598 // Data to be sent to each processor
8600
8601 // Storage for all values to be sent to all processors
8603
8604 // Start location within send_data for data to be sent to each processor
8606
8607 // Check missing ones
8608 for (int domain = 0; domain < n_proc; domain++)
8609 {
8610 // Set the offset for the current processor
8612
8613 // Don't bother to do anything if the processor in the loop is the
8614 // current processor
8615 if (domain != my_rank)
8616 {
8617 // Make backup of external halo node pointers with this domain
8619
8620 // Wipe
8622
8623 // How many do we have currently?
8624 unsigned nnod = backup_pt.size();
8626
8627 // Loop over external halo nodes with this domain
8628 for (unsigned j = 0; j < nnod; j++)
8629 {
8630 // Get pointer to node
8631 Node* nod_pt = backup_pt[j];
8632
8633 // Has it been nulled out?
8634 if (nod_pt == 0)
8635 {
8636 // Save index of nulled out one
8637 send_data.push_back(j);
8638 }
8639 else
8640 {
8641 // Still alive: Copy across
8643 }
8644 }
8645 }
8646
8647 // End of data
8648 send_data.push_back(-1);
8649
8650 // Find the number of data added to the vector
8652 }
8653
8654
8655 // Storage for the number of data to be received from each processor
8657
8658 // Now send numbers of data to be sent between all processors
8660 &send_n[0], 1, MPI_INT, &receive_n[0], 1, MPI_INT, Comm_pt->mpi_comm());
8661
8662
8663 // We now prepare the data to be received
8664 // by working out the displacements from the received data
8666 int receive_data_count = 0;
8667 for (int rank = 0; rank < n_proc; ++rank)
8668 {
8669 // Displacement is number of data received so far
8672 }
8673
8674 // Now resize the receive buffer for all data from all processors
8675 // Make sure that it has a size of at least one
8676 if (receive_data_count == 0)
8677 {
8679 }
8681
8682 // Make sure that the send buffer has size at least one
8683 // so that we don't get a segmentation fault
8684 if (send_data.size() == 0)
8685 {
8686 send_data.resize(1);
8687 }
8688
8689 // Now send the data between all the processors
8691 &send_n[0],
8693 MPI_INT,
8694 &receive_data[0],
8695 &receive_n[0],
8697 MPI_INT,
8698 Comm_pt->mpi_comm());
8699
8700 // Now use the received data
8701 for (int send_rank = 0; send_rank < n_proc; send_rank++)
8702 {
8703 // Don't bother to do anything for the processor corresponding to the
8704 // current processor or if no data were received from this processor
8705 if ((send_rank != my_rank) && (receive_n[send_rank] != 0))
8706 {
8707 // Counter for the data within the large array
8709
8710 // Unpack until we reach "end of data" indicator (-1)
8711 while (true)
8712 {
8713 // Read next entry
8714 int next_one = receive_data[count++];
8715
8716 if (next_one == -1)
8717 {
8718 break;
8719 }
8720 else
8721 {
8722 // Null out the entry
8724 }
8725 }
8726
8727 // Make backup of external haloed node pointers with this domain
8729
8730 // Wipe
8732
8733 // How many do we have currently?
8734 unsigned nnod = backup_pt.size();
8736
8737 // Loop over external haloed nodes with this domain
8738 for (unsigned j = 0; j < nnod; j++)
8739 {
8740 // Get pointer to node
8741 Node* nod_pt = backup_pt[j];
8742
8743 // Has it been nulled out?
8744 if (nod_pt != 0)
8745 {
8746 // Still alive: Copy across
8748 }
8749 }
8750 }
8751
8752 } // End of data is received
8753 }
8754
8755#endif
8756
8757
8758 // =================================================================
8759 /// Get the number of dof types in the mesh from the first element of the
8760 /// mesh. If MPI is on then also do some consistency checks between
8761 /// processors. \b Careful: Involves MPI Broadcasts and must therefore be
8762 /// called on all processors!
8763 // =================================================================
8764 unsigned Mesh::ndof_types() const
8765 {
8766 // Remains -1 if we don't have any elements on this processor.
8767 int int_ndof_types = -1;
8768 unsigned nel = nelement();
8769 if (nel > 0)
8770 {
8772#ifdef PARANOID
8773 // Check that every element in this mesh has the same number of
8774 // types of DOF.
8775 for (unsigned i = 1; i < nel; i++)
8776 {
8777 if (int_ndof_types != int(element_pt(i)->ndof_types()))
8778 {
8779 std::ostringstream error_message;
8780 error_message
8781 << "Every element in the mesh must have the same number of "
8782 << "types of DOF for ndof_types() to work\n"
8783 << "Element 0 has " << int_ndof_types << " DOF types\n"
8784 << "Element " << i << " [out of a total of " << nel << " ] has "
8785 << element_pt(i)->ndof_types() << " DOF types"
8786 << "Element types are: Element 0:" << typeid(*element_pt(0)).name()
8787 << "\n"
8788 << " Current Element :" << typeid(*element_pt(i)).name()
8789 << "\n";
8790 throw OomphLibError(error_message.str(),
8793 }
8794 }
8795#endif
8796 }
8797
8798#ifdef OOMPH_HAS_MPI
8799
8800 // If mesh is distributed
8801 if (is_mesh_distributed())
8802 {
8803 // if more than one processor then
8804 // + ensure number of DOFs is consistent on each processor (PARANOID)
8805 // + ensure processors with no elements in this mesh have the
8806 // correct number of DOF types
8807 if (Comm_pt->nproc() > 1)
8808 {
8809 unsigned nproc = Comm_pt->nproc();
8810 unsigned my_rank = Comm_pt->my_rank();
8811
8812 // Collect on root the number of dofs types determined independently
8813 // on all processors (-1 indicates that the processor didn't have
8814 // any elements and therefore doesn't know!)
8815 int* ndof_types_recv = 0;
8816 if (my_rank == 0)
8817 {
8818 ndof_types_recv = new int[nproc];
8819 }
8820
8822 1,
8823 MPI_INT,
8825 1,
8826 MPI_INT,
8827 0,
8828 Comm_pt->mpi_comm());
8829
8830 // Root: Update own number of dof types, check consistency amongst
8831 // all processors (in paranoid mode) and send out the actual
8832 // number of dof types to those processors who couldn't figure this
8833 // out themselves
8834 if (my_rank == 0)
8835 {
8836 // Check number of types of all non-root processors
8837 for (unsigned p = 1; p < nproc; p++)
8838 {
8839 if (ndof_types_recv[p] != -1)
8840 {
8841 // Processor p was able to figure out how many
8842 // dof types there are, so I root can update
8843 // its own (if required)
8844 if (int_ndof_types == -1)
8845 {
8847 }
8848#ifdef PARANOID
8849 // Check consistency
8850 else if (int_ndof_types != ndof_types_recv[p])
8851 {
8852 std::ostringstream error_message;
8853 error_message
8854 << "The elements in this mesh must have the same number "
8855 << "of types of DOF on each processor";
8856 for (unsigned p = 0; p < nproc; p++)
8857 {
8858 if (ndof_types_recv[p] != -1)
8859 {
8860 error_message << "Processor " << p << " : "
8861 << ndof_types_recv[p] << "\n";
8862 }
8863 else
8864 {
8865 error_message << "Processor " << p << " : (no elements)\n";
8866 }
8867 }
8868 throw OomphLibError(error_message.str(),
8871 }
8872#endif
8873 }
8874 }
8875
8876 // Now send ndof types to non-root processors that don't have it
8877 for (unsigned p = 1; p < nproc; p++)
8878 {
8879 if (ndof_types_recv[p] == -1)
8880 {
8881 MPI_Send(&int_ndof_types, 1, MPI_INT, p, 0, Comm_pt->mpi_comm());
8882 }
8883 }
8884 // clean up
8885 delete[] ndof_types_recv;
8886 }
8887 // "else if": "else" for non-root; "if" for checking if current
8888 // (non-root) processor does not know ndof type and is therefore
8889 // about to receive it from root.
8890 else if (int_ndof_types == -1)
8891 {
8893 1,
8894 MPI_INT,
8895 0,
8896 0,
8897 Comm_pt->mpi_comm(),
8899 }
8900 }
8901 }
8902#endif
8903
8904 // If int_ndof_types if still -1 then no elements were found for this mesh,
8905 // so it has no dofs.
8906 if (int_ndof_types == -1) int_ndof_types = 0;
8907
8908 return unsigned(int_ndof_types);
8909 }
8910
8911 // =================================================================
8912 /// Get the number of elemental dimension in the mesh from the first
8913 /// element of the mesh. If MPI is on then also do some consistency
8914 /// checks between processors. \b Careful: Involves MPI Broadcasts
8915 /// and must therefore be called on all processors!
8916 // =================================================================
8918 {
8919 // Remains -1 if we don't have any elements on this processor.
8920 int int_dim = -1;
8921 if (nelement() > 0)
8922 {
8924#ifdef PARANOID
8925 // Check that every element in this mesh has the same number of
8926 // types of elemental dimension.
8927 for (unsigned i = 1; i < nelement(); i++)
8928 {
8929 if (int_dim != int(finite_element_pt(i)->dim()))
8930 {
8931 std::ostringstream error_message;
8932 error_message
8933 << "Every element in the mesh must have the same number of "
8934 << "elemental dimension for elemental_dimension() to work.\n"
8935 << "Element 0 has elemental dimension " << int_dim << "\n"
8936 << "Element " << i << " has elemental dimension "
8937 << finite_element_pt(i)->dim() << ".";
8938 throw OomphLibError(error_message.str(),
8941 }
8942 }
8943#endif
8944 }
8945
8946#ifdef OOMPH_HAS_MPI
8947
8948 // If mesh is distributed
8949 if (Comm_pt != 0)
8950 {
8951 // if more than one processor then
8952 // + ensure dimension number is consistent on each processor (PARANOID)
8953 // + ensure processors with no elements in this mesh have the
8954 // correct dimension number.
8955 if (Comm_pt->nproc() > 1)
8956 {
8957 unsigned nproc = Comm_pt->nproc();
8958 unsigned my_rank = Comm_pt->my_rank();
8959
8960 // Collect on root the dimension number determined independently
8961 // on all processors (-1 indicates that the processor didn't have
8962 // any elements and therefore doesn't know!)
8963 int* dim_recv = 0;
8964 if (my_rank == 0)
8965 {
8966 dim_recv = new int[nproc];
8967 }
8968
8969 MPI_Gather(
8970 &int_dim, 1, MPI_INT, dim_recv, 1, MPI_INT, 0, Comm_pt->mpi_comm());
8971
8972 // Root: Update own dimension, check consistency amongst
8973 // all processors (in paranoid mode) and send out the actual
8974 // dimension number to those processors who couldn't figure this
8975 // out themselves
8976 if (my_rank == 0)
8977 {
8978 // Check number of types of all non-root processors
8979 for (unsigned p = 1; p < nproc; p++)
8980 {
8981 if (dim_recv[p] != -1)
8982 {
8983 // Processor p was able to figure out the elemental
8984 // dimension, so I root can update
8985 // its own (if required)
8986 if (int_dim == -1)
8987 {
8988 int_dim = dim_recv[p];
8989 }
8990#ifdef PARANOID
8991 // Check consistency
8992 else if (int_dim != dim_recv[p])
8993 {
8994 std::ostringstream error_message;
8995 error_message
8996 << "The elements in this mesh must have the same elemental "
8997 << "dimension number on each processor";
8998 for (unsigned p = 0; p < nproc; p++)
8999 {
9000 if (dim_recv[p] != -1)
9001 {
9002 error_message << "Processor " << p << " : " << dim_recv[p]
9003 << "\n";
9004 }
9005 else
9006 {
9007 error_message << "Processor " << p << " : (no elements)\n";
9008 }
9009 }
9010 throw OomphLibError(error_message.str(),
9013 }
9014#endif
9015 }
9016 }
9017
9018 // Now send the elemental dimension to non-root processors that
9019 // don't have it
9020 for (unsigned p = 1; p < nproc; p++)
9021 {
9022 if (dim_recv[p] == -1)
9023 {
9024 MPI_Send(&int_dim, 1, MPI_INT, p, 0, Comm_pt->mpi_comm());
9025 }
9026 }
9027 // clean up
9028 delete[] dim_recv;
9029 }
9030 // "else if": "else" for non-root; "if" for checking if current
9031 // (non-root) processor does not know elemental dimension and is
9032 // therefore about to receive it from root.
9033 else if (int_dim == -1)
9034 {
9035 MPI_Recv(
9036 &int_dim, 1, MPI_INT, 0, 0, Comm_pt->mpi_comm(), MPI_STATUS_IGNORE);
9037 }
9038 }
9039 }
9040#endif
9041
9042 // If int_dim if still -1 then no elements were found for this mesh, so it
9043 // has no elemental dimension.
9044 if (int_dim == -1) int_dim = 0;
9045
9046 return unsigned(int_dim);
9047 }
9048
9049 // =================================================================
9050 /// Get the number of nodal dimension in the mesh from the first
9051 /// element of the mesh. If MPI is on then also do some consistency
9052 /// checks between processors. \b Careful: Involves MPI Broadcasts
9053 /// and must therefore be called on all processors!
9054 // =================================================================
9055 unsigned Mesh::nodal_dimension() const
9056 {
9057 // Remains -1 if we don't have any elements on this processor.
9058 int int_dim = -1;
9059 if (nelement() > 0)
9060 {
9062#ifdef PARANOID
9063 // Check that every element in this mesh has the same number of
9064 // types of nodal dimension.
9065 for (unsigned i = 1; i < nelement(); i++)
9066 {
9068 {
9069 std::ostringstream error_message;
9070 error_message
9071 << "Every element in the mesh must have the same number of "
9072 << "nodal dimension for nodal_dimension() to work.\n"
9073 << "Element 0 has nodal dimension " << int_dim << "\n"
9074 << "Element " << i << " has nodal dimension "
9075 << finite_element_pt(i)->nodal_dimension() << ".";
9076 throw OomphLibError(error_message.str(),
9079 }
9080 }
9081#endif
9082 }
9083
9084#ifdef OOMPH_HAS_MPI
9085
9086 // If mesh is distributed
9087 if (Comm_pt != 0)
9088 {
9089 // if more than one processor then
9090 // + ensure dimension number is consistent on each processor (PARANOID)
9091 // + ensure processors with no elements in this mesh have the
9092 // correct dimension number.
9093 if (Comm_pt->nproc() > 1)
9094 {
9095 unsigned nproc = Comm_pt->nproc();
9096 unsigned my_rank = Comm_pt->my_rank();
9097
9098 // Collect on root the dimension number determined independently
9099 // on all processors (-1 indicates that the processor didn't have
9100 // any elements and therefore doesn't know!)
9101 int* dim_recv = 0;
9102 if (my_rank == 0)
9103 {
9104 dim_recv = new int[nproc];
9105 }
9106
9107 MPI_Gather(
9108 &int_dim, 1, MPI_INT, dim_recv, 1, MPI_INT, 0, Comm_pt->mpi_comm());
9109
9110 // Root: Update own dimension, check consistency amongst
9111 // all processors (in paranoid mode) and send out the actual
9112 // dimension number to those processors who couldn't figure this
9113 // out themselves
9114 if (my_rank == 0)
9115 {
9116 // Check number of types of all non-root processors
9117 for (unsigned p = 1; p < nproc; p++)
9118 {
9119 if (dim_recv[p] != -1)
9120 {
9121 // Processor p was able to figure out the nodal
9122 // dimension, so I root can update
9123 // its own (if required)
9124 if (int_dim == -1)
9125 {
9126 int_dim = dim_recv[p];
9127 }
9128#ifdef PARANOID
9129 // Check consistency
9130 else if (int_dim != dim_recv[p])
9131 {
9132 std::ostringstream error_message;
9133 error_message
9134 << "The elements in this mesh must have the same nodal "
9135 << "dimension number on each processor";
9136 for (unsigned p = 0; p < nproc; p++)
9137 {
9138 if (dim_recv[p] != -1)
9139 {
9140 error_message << "Processor " << p << " : " << dim_recv[p]
9141 << "\n";
9142 }
9143 else
9144 {
9145 error_message << "Processor " << p << " : (no elements)\n";
9146 }
9147 }
9148 throw OomphLibError(error_message.str(),
9151 }
9152#endif
9153 }
9154 }
9155
9156 // Now send the nodal dimension to non-root processors that
9157 // don't have it
9158 for (unsigned p = 1; p < nproc; p++)
9159 {
9160 if (dim_recv[p] == -1)
9161 {
9162 MPI_Send(&int_dim, 1, MPI_INT, p, 0, Comm_pt->mpi_comm());
9163 }
9164 }
9165 // clean up
9166 delete[] dim_recv;
9167 }
9168 // "else if": "else" for non-root; "if" for checking if current
9169 // (non-root) processor does not know nodal dimension and is therefore
9170 // about to receive it from root.
9171 else if (int_dim == -1)
9172 {
9173 MPI_Recv(
9174 &int_dim, 1, MPI_INT, 0, 0, Comm_pt->mpi_comm(), MPI_STATUS_IGNORE);
9175 }
9176 }
9177 }
9178#endif
9179
9180 // If int_dim if still -1 then no elements were found for this mesh, so it
9181 // has no nodal dimension.
9182 if (int_dim == -1) int_dim = 0;
9183
9184 return unsigned(int_dim);
9185 }
9186
9187 //========================================================================
9188 /// Wipe the storage for all externally-based elements and delete halos
9189 //========================================================================
9191 {
9192#ifdef OOMPH_HAS_MPI
9193
9194 // Only do for distributed meshes
9195 if (is_mesh_distributed())
9196 {
9197 // Some of the external halo/haloed nodes are masters of nodes
9198 // in this mesh. We must set to be non-hanging any nodes whose
9199 // masters we are about to delete, to remove any dependencies.
9200
9201 // Loop over all the mesh nodes and check their masters
9202 for (unsigned i = 0; i < nnode(); i++)
9203 {
9204 // Get pointer to the node
9205 Node* nod_pt = node_pt(i);
9206
9207 // Check if the node exists
9208 if (nod_pt != 0)
9209 {
9210 // Check if the node is hanging
9211 if (nod_pt->is_hanging())
9212 {
9213 // Get pointer to the hang info
9214 HangInfo* hang_pt = nod_pt->hanging_pt();
9215
9216 // Check if any master is in the external halo storage
9217 // External haloed nodes don't get deleted, so we don't need to
9218 //(and shouldn't) un-hang their dependents
9220 for (unsigned m = 0; m < hang_pt->nmaster(); m++)
9221 {
9222 // Iterator for vector of nodes
9224
9225 // Loop over external halo storage with all processors
9227 for (int d = 0; d < Comm_pt->nproc(); d++)
9228 {
9229 // Find master in map of external halo nodes
9230 it = std::find(External_halo_node_pt[d].begin(),
9232 hang_pt->master_node_pt(m));
9233
9234 // Check if it was found
9235 if (it != External_halo_node_pt[d].end())
9236 {
9237 // Mark as found
9239 // Don't need to search remaining processors
9240 break;
9241 }
9242 }
9243
9244 // Check if any have been found
9246 {
9247 // Mark as found
9249 // Don't need to search remaining masters
9250 break;
9251 }
9252 }
9253
9254 // If it was found...
9256 {
9257 // Master is in external halo storage and is about to be deleted,
9258 // so we'd better make this node non-hanging. In case the node
9259 // does not become hanging again, we must get all the required
9260 // information from its masters to make it a 'proper' node again.
9261
9262 // Reconstruct the nodal values/position from the node's
9263 // hanging node representation
9264 unsigned nt = nod_pt->ntstorage();
9265 unsigned n_value = nod_pt->nvalue();
9266 Vector<double> values(n_value);
9267 unsigned n_dim = nod_pt->ndim();
9268 Vector<double> position(n_dim);
9269 // Loop over all history values
9270 for (unsigned t = 0; t < nt; t++)
9271 {
9272 nod_pt->value(t, values);
9273 for (unsigned i = 0; i < n_value; i++)
9274 {
9275 nod_pt->set_value(t, i, values[i]);
9276 }
9277 nod_pt->position(t, position);
9278 for (unsigned i = 0; i < n_dim; i++)
9279 {
9280 nod_pt->x(t, i) = position[i];
9281 }
9282 }
9283
9284 // If it's an algebraic node: Update its previous nodal positions
9285 // too
9286 AlgebraicNode* alg_node_pt = dynamic_cast<AlgebraicNode*>(nod_pt);
9287 if (alg_node_pt != 0)
9288 {
9289 bool update_all_time_levels = true;
9291 }
9292
9293
9294 // If it's a Solid node, update Lagrangian coordinates
9295 // from its hanging node representation
9296 SolidNode* solid_node_pt = dynamic_cast<SolidNode*>(nod_pt);
9297 if (solid_node_pt != 0)
9298 {
9300 for (unsigned i = 0; i < n_lagrangian; i++)
9301 {
9302 solid_node_pt->xi(i) = solid_node_pt->lagrangian_position(i);
9303 }
9304 }
9305
9306 // No need to worry about geometrically hanging nodes
9307 // on boundaries (as in (p_)adapt_mesh())
9308 /// /Now store geometrically hanging nodes on boundaries that
9309 /// /may need updating after refinement.
9310 /// /There will only be a problem if we have 3 spatial dimensions
9311 // if((mesh_dim > 2) && (nod_pt->is_hanging()))
9312 // {
9313 // //If the node is on a boundary then add a pointer to the node
9314 // //to our lookup scheme
9315 // if(nod_pt->is_on_boundary())
9316 // {
9317 // //Storage for the boundaries on which the Node is located
9318 // std::set<unsigned>* boundaries_pt;
9319 // nod_pt->get_boundaries_pt(boundaries_pt);
9320 // if(boundaries_pt!=0)
9321 // {
9322 // //Loop over the boundaries and add a pointer to the node
9323 // //to the appropriate storage scheme
9324 // for(std::set<unsigned>::iterator
9325 // it=boundaries_pt->begin();
9326 // it!=boundaries_pt->end();++it)
9327 // {
9328 // hanging_nodes_on_boundary_pt[*it].insert(nod_pt);
9329 // }
9330 // }
9331 // }
9332 // }
9333
9334 // Finally set nonhanging
9335 nod_pt->set_nonhanging();
9336 }
9337 }
9338 }
9339 else
9340 {
9341 // Node doesn't exist!
9342 }
9343 }
9344 }
9345
9346 // Careful: some of the external halo nodes are also in boundary
9347 // node storage and should be removed from this first
9348 for (std::map<unsigned, Vector<Node*>>::iterator it =
9349 External_halo_node_pt.begin();
9350 it != External_halo_node_pt.end();
9351 it++)
9352 {
9353 // Processor ID
9354 int d = (*it).first;
9355
9356 // How many external haloes with this process?
9357 unsigned n_ext_halo_nod = nexternal_halo_node(d);
9358 for (unsigned j = 0; j < n_ext_halo_nod; j++)
9359 {
9361 unsigned n_bnd = nboundary();
9362 for (unsigned i_bnd = 0; i_bnd < n_bnd; i_bnd++)
9363 {
9364 // Call this for all boundaries; it will do nothing
9365 // if the node is not on the current boundary
9367 }
9368 }
9369 }
9370
9371 // A loop to delete external halo nodes
9372 for (std::map<unsigned, Vector<Node*>>::iterator it =
9373 External_halo_node_pt.begin();
9374 it != External_halo_node_pt.end();
9375 it++)
9376 {
9377 // Processor ID
9378 int d = (*it).first;
9379
9380 unsigned n_ext_halo_nod = nexternal_halo_node(d);
9381 for (unsigned j = 0; j < n_ext_halo_nod; j++)
9382 {
9383 // Only delete if it's not a node stored in the current mesh
9384 bool is_a_mesh_node = false;
9385 unsigned n_node = nnode();
9386 for (unsigned jj = 0; jj < n_node; jj++)
9387 {
9388 if (Node_pt[jj] == External_halo_node_pt[d][j])
9389 {
9390 is_a_mesh_node = true;
9391 }
9392 }
9393
9394 // There will also be duplications between multiple processors,
9395 // so make sure that we don't try to delete these twice
9396 if (!is_a_mesh_node)
9397 {
9398 // Loop over all other higher-numbered processors and check
9399 // for duplicated external halo nodes
9400 // (The highest numbered processor should delete all its ext halos)
9401 for (std::map<unsigned, Vector<Node*>>::iterator itt =
9402 External_halo_node_pt.begin();
9403 itt != External_halo_node_pt.end();
9404 itt++)
9405 {
9406 // Processor ID
9407 int dd = (*itt).first;
9408
9409 if (dd > d)
9410 {
9411 unsigned n_ext_halo = nexternal_halo_node(dd);
9412 for (unsigned jjj = 0; jjj < n_ext_halo; jjj++)
9413 {
9416 {
9417 is_a_mesh_node = true;
9418 }
9419 }
9420 }
9421 }
9422 }
9423
9424 // Only now if no duplicates exist can the node be safely deleted
9425 if (!is_a_mesh_node)
9426 {
9427 delete External_halo_node_pt[d][j];
9428 }
9429 }
9430 }
9431
9432 // Another loop to delete external halo elements (which are distinct)
9433 for (std::map<unsigned, Vector<GeneralisedElement*>>::iterator it =
9436 it++)
9437 {
9438 // Processor ID
9439 int d = (*it).first;
9440
9442 for (unsigned e = 0; e < n_ext_halo_el; e++)
9443 {
9444 delete External_halo_element_pt[d][e];
9445 }
9446 }
9447
9448 // Now we are okay to clear the external halo node storage
9449 External_halo_node_pt.clear();
9451
9452 // External haloed nodes and elements are actual members
9453 // of the external mesh and should not be deleted
9456#endif
9457 }
9458
9459
9460#ifdef OOMPH_HAS_MPI
9461
9462 // NOTE: the add_external_haloed_node_pt and add_external_haloed_element_pt
9463 // functions need to check whether the Node/FiniteElement argument
9464 // has been added to the storage already; this is not the case
9465 // for the add_external_halo_node_pt and add_external_halo_element_pt
9466 // functions as these are newly-created elements that are created and
9467 // added to the storage based on the knowledge of when their haloed
9468 // counterparts were created and whether they were newly added
9469
9470 //========================================================================
9471 /// Add external haloed element whose non-halo counterpart is held
9472 /// on processor p to the storage scheme for external haloed elements.
9473 /// If the element is already in the storage scheme then return its index
9474 //========================================================================
9475 unsigned Mesh::add_external_haloed_element_pt(const unsigned& p,
9477 {
9478 // Loop over current storage
9480
9481 // Is this already an external haloed element?
9483 unsigned external_haloed_el_index = 0;
9484 for (unsigned eh = 0; eh < n_extern_haloed; eh++)
9485 {
9487 {
9488 // It's already there, so...
9490 // ...set the index of this element
9492 break;
9493 }
9494 }
9495
9496 // Has it been found?
9498 {
9499 // Not found, so add it:
9501 // Return the index where it's just been added
9502 return n_extern_haloed;
9503 }
9504 else
9505 {
9506 // Return the index where it was found
9508 }
9509 }
9510
9511 //========================================================================
9512 /// Add external haloed node whose halo (external) counterpart
9513 /// is held on processor p to the storage scheme for external haloed nodes.
9514 /// If the node is already in the storage scheme then return its index
9515 //========================================================================
9516 unsigned Mesh::add_external_haloed_node_pt(const unsigned& p, Node*& nod_pt)
9517 {
9518 // Loop over current storage
9520
9521 // Is this already an external haloed node?
9522 bool is_an_external_haloed_node = false;
9523 unsigned external_haloed_node_index = 0;
9524 for (unsigned k = 0; k < n_ext_haloed_nod; k++)
9525 {
9527 {
9530 break;
9531 }
9532 }
9533
9534 // Has it been found?
9536 {
9537 // Not found, so add it
9538 External_haloed_node_pt[p].push_back(nod_pt);
9539 // Return the index where it's just been added
9540 return n_ext_haloed_nod;
9541 }
9542 else
9543 {
9544 // Return the index where it was found
9546 }
9547 }
9548
9549#endif
9550
9551
9552 //////////////////////////////////////////////////////////////////////
9553 //////////////////////////////////////////////////////////////////////
9554 // Functions for solid meshes
9555 //////////////////////////////////////////////////////////////////////
9556 //////////////////////////////////////////////////////////////////////
9557
9558
9559 //========================================================================
9560 /// Make the current configuration the undeformed one by
9561 /// setting the nodal Lagrangian coordinates to their current
9562 /// Eulerian ones
9563 //========================================================================
9565 {
9566 // Find out how many nodes there are
9567 unsigned long n_node = nnode();
9568
9569 // Loop over all the nodes
9570 for (unsigned n = 0; n < n_node; n++)
9571 {
9572 // Cast node to solid node (can safely be done because
9573 // SolidMeshes consist of SolidNodes
9574 SolidNode* node_pt = static_cast<SolidNode*>(Node_pt[n]);
9575
9576 // Number of Lagrangian coordinates
9577 unsigned n_lagrangian = node_pt->nlagrangian();
9578
9579 // Number of generalised Lagrangian coordinates
9581
9582 // The assumption here is that there must be fewer lagrangian coordinates
9583 // than eulerian (which must be true?)
9584 // Set (generalised) Lagrangian coords = (generalised) Eulerian coords
9585 for (unsigned k = 0; k < n_lagrangian_type; k++)
9586 {
9587 // Loop over lagrangian coordinates and set their values
9588 for (unsigned j = 0; j < n_lagrangian; j++)
9589 {
9590 node_pt->xi_gen(k, j) = node_pt->x_gen(k, j);
9591 }
9592 }
9593 }
9594 }
9595
9596
9597 //=======================================================================
9598 /// Static problem that can be used to assign initial conditions
9599 /// on a given mesh.
9600 //=======================================================================
9602
9603
9604 ///////////////////////////////////////////////////////////////////////
9605 ///////////////////////////////////////////////////////////////////////
9606 ///////////////////////////////////////////////////////////////////////
9607
9608
9609 //=================================================================
9610 /// Namespace for paraview-style output helper functions
9611 //=================================================================
9612 namespace ParaviewHelper
9613 {
9614 /// Write the pvd file header
9615 void write_pvd_header(std::ofstream& pvd_file)
9616 {
9617 pvd_file << "<?xml version=\"1.0\"?>" << std::endl
9618 << "<VTKFile type=\"Collection\" version=\"0.1\">" << std::endl
9619 << "<Collection>" << std::endl;
9620 }
9621
9622 /// Add name of output file and associated continuous time
9623 /// to pvd file.
9624 void write_pvd_information(std::ofstream& pvd_file,
9625 const std::string& output_filename,
9626 const double& time)
9627 {
9628 // Output the actual time values
9629 pvd_file << "<DataSet timestep=\"" << time << "\" ";
9630
9631 // Apparently this has to go in
9632 pvd_file << "part=\"0\" ";
9633
9634 // Add the name of the file, so that the pvd file knows what it is called
9635 pvd_file << "file=\"" << output_filename << "\"/>" << std::endl;
9636 }
9637
9638 /// Write the pvd file footer
9639 void write_pvd_footer(std::ofstream& pvd_file)
9640 {
9641 pvd_file << "</Collection>" << std::endl << "</VTKFile>";
9642 }
9643
9644 } // namespace ParaviewHelper
9645
9646 ////////////////////////////////////////////////////////////////
9647 ////////////////////////////////////////////////////////////////
9648 ////////////////////////////////////////////////////////////////
9649
9650
9651} // namespace oomph
e
Definition cfortran.h:571
static char t char * s
Definition cfortran.h:568
cstr elem_len * i
Definition cfortran.h:603
char t
Definition cfortran.h:568
Algebraic nodes are nodes with an algebraic positional update function.
A class that contains the information required by Nodes that are located on Mesh boundaries....
Definition nodes.h:1996
GeneralisedTimestepper used to store the arclength derivatives and pervious solutions required in con...
A Class for the derivatives of shape functions The class design is essentially the same as Shape,...
Definition shape.h:278
TimeStepper *& time_stepper_pt()
Return the pointer to the timestepper.
Definition nodes.h:238
bool does_pointer_correspond_to_value(double *const &parameter_pt)
Check whether the pointer parameter_pt addresses internal data values.
Definition nodes.cc:568
void dump(std::ostream &dump_file) const
Dump the data object to a file.
Definition nodes.cc:645
void set_halo(const unsigned &non_halo_proc_ID)
Label the node as halo and specify processor that holds non-halo counterpart.
Definition nodes.h:520
void set_time_stepper(TimeStepper *const &time_stepper_pt, const bool &preserve_existing_data)
Set a new timestepper by resizing the appropriate storage. If already assigned the equation numbering...
Definition nodes.cc:406
void read(std::ifstream &restart_file)
Read data object from a file.
Definition nodes.cc:672
Information for documentation of results: Directory and file number to enable output in the form RESL...
std::string & label()
String used (e.g.) for labeling output files.
bool is_doc_enabled() const
Are we documenting?
std::string directory() const
Output directory.
unsigned & number()
Number used (e.g.) for labeling output files.
A general Finite Element class.
Definition elements.h:1317
virtual void scalar_value_paraview(std::ofstream &file_out, const unsigned &i, const unsigned &nplot) const
Write values of the i-th scalar field at the plot points. Broken virtual. Needs to be implemented for...
Definition elements.h:3005
virtual unsigned nplot_points_paraview(const unsigned &nplot) const
Return the number of actual plot points for paraview plot with parameter nplot. Broken virtual; can b...
Definition elements.h:2866
Integral *const & integral_pt() const
Return the pointer to the integration scheme (const version)
Definition elements.h:1967
void position(const Vector< double > &zeta, Vector< double > &r) const
Return the parametrised position of the FiniteElement in its incarnation as a GeomObject,...
Definition elements.h:2680
virtual void local_coordinate_of_node(const unsigned &j, Vector< double > &s) const
Get local coordinates of node j in the element; vector sets its own size (broken virtual)
Definition elements.h:1846
virtual void output(std::ostream &outfile)
Output the element data — typically the values at the nodes in a format suitable for post-processing.
Definition elements.h:3054
double size() const
Calculate the size of the element (length, area, volume,...) in Eulerian computational coordinates....
Definition elements.cc:4320
virtual std::string scalar_name_paraview(const unsigned &i) const
Name of the i-th scalar field. Default implementation returns V1 for the first one,...
Definition elements.h:3047
void check_jacobian(const double &jacobian) const
Helper function used to check for singular or negative Jacobians in the transform from local to globa...
Definition elements.cc:1778
unsigned dim() const
Return the spatial dimension of the element, i.e. the number of local coordinates required to paramet...
Definition elements.h:2615
unsigned nnode() const
Return the number of nodes.
Definition elements.h:2214
virtual void write_paraview_type(std::ofstream &file_out, const unsigned &nplot) const
Return the paraview element type. Broken virtual. Needs to be implemented for each new geometric elem...
Definition elements.h:2968
void(* SteadyExactSolutionFctPt)(const Vector< double > &, Vector< double > &)
Function pointer for function that computes vector-valued steady "exact solution" as .
Definition elements.h:1763
void get_x(const Vector< double > &s, Vector< double > &x) const
Global coordinates as function of local coordinates. Either via FE representation or via macro-elemen...
Definition elements.h:1889
virtual unsigned nscalar_paraview() const
Number of scalars/fields output by this element. Broken virtual. Needs to be implemented for each new...
Definition elements.h:2992
virtual Node * construct_boundary_node(const unsigned &n)
Construct the local node n as a boundary node; that is a node that MAY be placed on a mesh boundary a...
Definition elements.h:2542
double dshape_eulerian(const Vector< double > &s, Shape &psi, DShape &dpsidx) const
Compute the geometric shape functions and also first derivatives w.r.t. global coordinates at local c...
Definition elements.cc:3328
Node *& node_pt(const unsigned &n)
Return a pointer to the local node n.
Definition elements.h:2179
virtual unsigned nsub_elements_paraview(const unsigned &nplot) const
Return the number of local sub-elements for paraview plot with parameter nplot. Broken virtual; can b...
Definition elements.h:2880
virtual void describe_local_dofs(std::ostream &out, const std::string &current_string) const
Function to describe the local dofs of the element[s]. The ostream specifies the output stream to whi...
Definition elements.cc:1737
unsigned nodal_dimension() const
Return the required Eulerian dimension of the nodes in this element.
Definition elements.h:2488
virtual void node_update()
Update the positions of all nodes in the element using each node update function. The default impleme...
Definition elements.cc:5102
virtual void scalar_value_fct_paraview(std::ofstream &file_out, const unsigned &i, const unsigned &nplot, FiniteElement::SteadyExactSolutionFctPt exact_soln_pt) const
Write values of the i-th scalar field at the plot points. Broken virtual. Needs to be implemented for...
Definition elements.h:3017
void output_paraview(std::ofstream &file_out, const unsigned &nplot) const
Paraview output – this outputs the coordinates at the plot points (for parameter nplot) to specified ...
Definition elements.h:2893
void(* UnsteadyExactSolutionFctPt)(const double &, const Vector< double > &, Vector< double > &)
Function pointer for function that computes Vector-valued time-dependent function as .
Definition elements.h:1769
static bool Suppress_output_while_checking_for_inverted_elements
Static boolean to suppress output while checking for inverted elements.
Definition elements.h:1783
int get_node_number(Node *const &node_pt) const
Return the number of the node *node_pt if this node is in the element, else return -1;.
Definition elements.cc:3844
virtual void write_paraview_offsets(std::ofstream &file_out, const unsigned &nplot, unsigned &offset_sum) const
Return the offsets for the paraview sub-elements. Broken virtual. Needs to be implemented for each ne...
Definition elements.h:2980
virtual void write_paraview_output_offset_information(std::ofstream &file_out, const unsigned &nplot, unsigned &counter) const
Fill in the offset information for paraview plot. Broken virtual. Needs to be implemented for each ne...
Definition elements.h:2956
A Generalised Element class.
Definition elements.h:73
void set_nonhalo()
Label the element as not being a halo.
Definition elements.h:1144
virtual unsigned ndof_types() const
The number of types of degrees of freedom in this element are sub-divided into.
Definition elements.h:1189
bool is_halo() const
Is this element a halo?
Definition elements.h:1150
void assign_internal_eqn_numbers(unsigned long &global_number, Vector< double * > &Dof_pt)
Assign the global equation numbers to the internal Data. The arguments are the current highest global...
Definition elements.cc:534
Data *& internal_data_pt(const unsigned &i)
Return a pointer to i-th internal data object.
Definition elements.h:605
int non_halo_proc_ID()
ID of processor ID that holds non-halo counterpart of halo element; negative if not a halo.
Definition elements.h:1157
unsigned ninternal_data() const
Return the number of internal data objects.
Definition elements.h:810
bool must_be_kept_as_halo() const
Test whether the element must be kept as a halo element.
Definition elements.h:1176
void add_internal_value_pt_to_map(std::map< unsigned, double * > &map_of_value_pt)
Add pointers to the internal data values to map indexed by the global equation number.
Definition elements.cc:616
void set_halo(const unsigned &non_halo_proc_ID)
Label the element as halo and specify processor that holds non-halo counterpart.
Definition elements.h:1138
virtual void assign_local_eqn_numbers(const bool &store_local_dof_pt)
Setup the arrays of local equation numbers for the element. If the optional boolean argument is true,...
Definition elements.cc:696
void describe_dofs(std::ostream &out, const std::string &current_string) const
Function to describe the dofs of the element. The ostream specifies the output stream to which the de...
Definition elements.cc:556
unsigned ndim() const
Access function to # of Eulerian coordinates.
TimeStepper *& time_stepper_pt()
Access function for pointer to time stepper: Null if object is not time-dependent.
unsigned nlagrangian() const
Access function to # of Lagrangian coordinates.
Class that contains data for hanging nodes.
Definition nodes.h:742
Node *const & master_node_pt(const unsigned &i) const
Return a pointer to the i-th master node.
Definition nodes.h:791
unsigned nmaster() const
Return the number of master nodes.
Definition nodes.h:785
virtual double knot(const unsigned &i, const unsigned &j) const =0
Return local coordinate s[j] of i-th integration point.
virtual unsigned nweight() const =0
Return the number of integration points of the scheme.
void add_boundary_node(const unsigned &b, Node *const &node_pt)
Add a (pointer to) a node to the b-th boundary.
Definition mesh.cc:243
Vector< Node * > Node_pt
Vector of pointers to nodes.
Definition mesh.h:183
std::map< unsigned, Vector< Node * > > Shared_node_pt
Map of vectors holding the pointers to the shared nodes. These are all the nodes that are on two "nei...
Definition mesh.h:116
static Steady< 0 > Default_TimeStepper
Default Steady Timestepper, to be used in default arguments to Mesh constructors.
Definition mesh.h:75
bool does_pointer_correspond_to_mesh_data(double *const &parameter_pt)
Does the double pointer correspond to any mesh data.
Definition mesh.cc:2471
unsigned ndof_types() const
Return number of dof types in mesh.
Definition mesh.cc:8764
void remove_boundary_node(const unsigned &b, Node *const &node_pt)
Remove a node from the boundary b.
Definition mesh.cc:221
bool is_mesh_distributed() const
Boolean to indicate if Mesh has been distributed.
Definition mesh.h:1596
void output_external_halo_elements(std::ostream &outfile, const unsigned &n_plot=5)
Output all external halo elements.
Definition mesh.h:2166
void setup_shared_node_scheme()
Setup shared node scheme.
Definition mesh.cc:2712
Node *& external_halo_node_pt(const unsigned &p, const unsigned &j)
Access fct to the j-th external halo node in this Mesh whose non-halo external counterpart is held on...
Definition mesh.h:2385
std::map< unsigned, Vector< GeneralisedElement * > > Root_haloed_element_pt
Map of vectors holding the pointers to the root haloed elements.
Definition mesh.h:103
void add_halo_node_pt(const unsigned &p, Node *&nod_pt)
Add halo node whose non-halo counterpart is held on processor p to the storage scheme for halo nodes.
Definition mesh.h:1914
unsigned nexternal_haloed_element()
Total number of external haloed elements in this Mesh.
Definition mesh.h:2275
unsigned add_external_haloed_node_pt(const unsigned &p, Node *&nod_pt)
Add external haloed node whose halo (external) counterpart is held on processor p to the storage sche...
Definition mesh.cc:9516
unsigned nexternal_halo_node()
Total number of external halo nodes in this Mesh.
Definition mesh.h:2316
unsigned nodal_dimension() const
Return number of nodal dimension in mesh.
Definition mesh.cc:9055
bool Lookup_for_elements_next_boundary_is_setup
Flag to indicate that the lookup schemes for elements that are adjacent to the boundaries has been se...
Definition mesh.h:87
void flush_element_and_node_storage()
Flush storage for elements and nodes by emptying the vectors that store the pointers to them....
Definition mesh.h:411
std::map< unsigned, Vector< GeneralisedElement * > > External_haloed_element_pt
Map of vectors holding the pointers to the external haloed elements.
Definition mesh.h:129
void add_shared_node_pt(const unsigned &p, Node *&nod_pt)
Add shared node whose counterpart is held on processor p to the storage scheme for shared nodes....
Definition mesh.h:2141
void flush_element_storage()
Flush storage for elements (only) by emptying the vectors that store the pointers to them....
Definition mesh.h:427
FiniteElement * finite_element_pt(const unsigned &e) const
Upcast (downcast?) to FiniteElement (needed to access FiniteElement member functions).
Definition mesh.h:477
void get_haloed_node_stats(double &av_number, unsigned &max_number, unsigned &min_number)
Get haloed node stats for this distributed mesh: Average/max/min number of haloed nodes over all proc...
Definition mesh.cc:4904
void check_halo_schemes(DocInfo &doc_info, double &max_permitted_error_for_halo_check)
Check halo and shared schemes on the mesh.
Definition mesh.cc:6881
virtual void set_mesh_level_time_stepper(TimeStepper *const &time_stepper_pt, const bool &preserve_existing_data)
Function that can be used to set any additional timestepper data stored at the Mesh (as opposed to no...
Definition mesh.cc:2402
void describe_local_dofs(std::ostream &out, const std::string &current_string) const
Function to describe the local dofs of the elements. The ostream specifies the output stream to which...
Definition mesh.cc:746
virtual void classify_halo_and_haloed_nodes(DocInfo &doc_info, const bool &report_stats)
Classify the halo and haloed nodes in the mesh. Virtual so it can be overloaded to perform additional...
Definition mesh.cc:3262
void add_root_haloed_element_pt(const unsigned &p, GeneralisedElement *&el_pt)
Add root haloed element whose non-halo counterpart is held on processor p to the storage scheme for h...
Definition mesh.h:1985
void synchronise_shared_nodes(const bool &report_stats)
Synchronise shared node lookup schemes to cater for the the case where: (1) a certain node on the cur...
Definition mesh.cc:2890
void set_elemental_internal_time_stepper(TimeStepper *const &time_stepper_pt, const bool &preserve_existing_data)
Set the timestepper associated with the internal data stored within elements in the meah.
Definition mesh.cc:2535
unsigned nboundary_element(const unsigned &b) const
Return number of finite elements that are adjacent to boundary b.
Definition mesh.h:886
bool Keep_all_elements_as_halos
bool to indicate whether to keep all elements in a mesh as halos or not
Definition mesh.h:141
Node *& node_pt(const unsigned long &n)
Return pointer to global node n.
Definition mesh.h:440
std::map< unsigned, Vector< Node * > > External_haloed_node_pt
Map of vectors holding the pointers to the external haloed nodes.
Definition mesh.h:138
virtual void setup_boundary_element_info()
Interface for function that is used to setup the boundary information (Empty virtual function – imple...
Definition mesh.h:279
FiniteElement * boundary_element_pt(const unsigned &b, const unsigned &e) const
Return pointer to e-th finite element on boundary b.
Definition mesh.h:848
std::map< unsigned, Vector< GeneralisedElement * > > Root_halo_element_pt
Map of vectors holding the pointers to the root halo elements.
Definition mesh.h:100
unsigned elemental_dimension() const
Return number of elemental dimension in mesh.
Definition mesh.cc:8917
void check_inverted_elements(bool &mesh_has_inverted_elements, std::ofstream &inverted_element_file)
Check for inverted elements and report outcome in boolean variable. This visits all elements at their...
Definition mesh.cc:870
OomphCommunicator * Comm_pt
Pointer to communicator – set to NULL if mesh is not distributed.
Definition mesh.h:119
virtual void create_shared_boundaries(OomphCommunicator *comm_pt, const Vector< unsigned > &element_domain, const Vector< GeneralisedElement * > &backed_up_el_pt, const Vector< FiniteElement * > &backed_up_f_el_pt, std::map< Data *, std::set< unsigned > > &processors_associated_with_data, const bool &overrule_keep_as_halo_element_status)
Creates the shared boundaries, only used in unstructured meshes In this case with the "TriangleMesh" ...
Definition mesh.h:2498
void add_root_halo_element_pt(const unsigned &p, GeneralisedElement *&el_pt)
Add root halo element whose non-halo counterpart is held on processor p to this Mesh.
Definition mesh.h:1879
void output_fct(std::ostream &outfile, const unsigned &n_plot, FiniteElement::SteadyExactSolutionFctPt)
Output a given Vector function at f(n_plot) points in each element.
Definition mesh.cc:2199
void shift_time_values()
Shift time-dependent data along for next timestep: Deal with nodal Data/positions and the element's i...
Definition mesh.cc:2326
virtual void reorder_nodes(const bool &use_old_ordering=true)
Re-order nodes in the order in which they appear in elements – can be overloaded for more efficient r...
Definition mesh.cc:508
void prune_halo_elements_and_nodes(Vector< GeneralisedElement * > &deleted_element_pt, const bool &report_stats=false)
(Irreversibly) prune halo(ed) elements and nodes, usually after another round of refinement,...
Definition mesh.h:1675
void calculate_predictions()
Calculate predictions for all Data and positions associated with the mesh, usually used in adaptive t...
Definition mesh.cc:2366
unsigned nboundary() const
Return number of boundaries.
Definition mesh.h:835
unsigned add_external_haloed_element_pt(const unsigned &p, GeneralisedElement *&el_pt)
Add external haloed element whose non-halo counterpart is held on processor p to the storage scheme f...
Definition mesh.cc:9475
void describe_dofs(std::ostream &out, const std::string &current_string) const
Function to describe the dofs of the Mesh. The ostream specifies the output stream to which the descr...
Definition mesh.cc:711
Vector< GeneralisedElement * > halo_element_pt(const unsigned &p)
Return vector of halo elements in this Mesh whose non-halo counterpart is held on processor p.
Definition mesh.h:1748
void output_fct_paraview(std::ofstream &file_out, const unsigned &nplot, FiniteElement::SteadyExactSolutionFctPt exact_soln_pt) const
Output in paraview format into specified file. Breaks up each element into sub-elements for plotting ...
Definition mesh.cc:1491
void remove_boundary_nodes()
Clear all pointers to boundary nodes.
Definition mesh.cc:204
bool Output_halo_elements
Bool for output of halo elements.
Definition mesh.h:2035
unsigned long nnode() const
Return number of nodes in the mesh.
Definition mesh.h:604
virtual void node_update(const bool &update_all_solid_nodes=false)
Update nodal positions in response to changes in the domain shape. Uses the FiniteElement::get_x(....
Definition mesh.cc:287
void get_efficiency_of_mesh_distribution(double &av_efficiency, double &max_efficiency, double &min_efficiency)
Get efficiency of mesh distribution: In an ideal distribution without halo overhead,...
Definition mesh.cc:6390
unsigned nroot_haloed_element()
Total number of root haloed elements in this Mesh.
Definition mesh.h:1929
void output_external_haloed_elements(std::ostream &outfile, const unsigned &n_plot=5)
Output all external haloed elements.
Definition mesh.h:2198
void delete_all_external_storage()
Wipe the storage for all externally-based elements.
Definition mesh.cc:9190
void get_all_halo_data(std::map< unsigned, double * > &map_of_halo_data)
Get all the halo data stored in the mesh and add pointers to the data to the map, indexed by global e...
Definition mesh.cc:4749
Node *& external_haloed_node_pt(const unsigned &p, const unsigned &j)
Access fct to the j-th external haloed node in this Mesh whose halo external counterpart is held on p...
Definition mesh.h:2449
unsigned nhalo_node()
Total number of halo nodes in this Mesh.
Definition mesh.h:1886
std::map< unsigned, Vector< Node * > > Halo_node_pt
Map of vectors holding the pointers to the halo nodes.
Definition mesh.h:106
unsigned check_for_repeated_nodes(const double &epsilon=1.0e-12)
Check for repeated nodes within a given spatial tolerance. Return (0/1) for (pass/fail).
Definition mesh.h:760
void convert_to_boundary_node(Node *&node_pt, const Vector< FiniteElement * > &finite_element_pt)
A function that upgrades an ordinary node to a boundary node We shouldn't ever really use this,...
Definition mesh.cc:2590
const Vector< GeneralisedElement * > & element_pt() const
Return reference to the Vector of elements.
Definition mesh.h:464
void output(std::ostream &outfile)
Output for all elements.
Definition mesh.cc:2027
void add_element_pt(GeneralisedElement *const &element_pt)
Add a (pointer to) an element to the mesh.
Definition mesh.h:625
std::map< unsigned, Vector< Node * > > Haloed_node_pt
Map of vectors holding the pointers to the haloed nodes.
Definition mesh.h:109
void assign_initial_values_impulsive()
Assign initial values for an impulsive start.
Definition mesh.cc:2288
Node * haloed_node_pt(const unsigned &p, const unsigned &j)
Access fct to the j-th haloed node in this Mesh whose halo counterpart is held on processor p.
Definition mesh.h:2022
bool Resize_halo_nodes_not_required
Set this to true to suppress resizing of halo nodes (at your own risk!)
Definition mesh.h:144
void assign_local_eqn_numbers(const bool &store_local_dof_pt)
Assign the local equation numbers in all elements If the boolean argument is true then also store poi...
Definition mesh.cc:765
Vector< Vector< Node * > > Boundary_node_pt
Vector of Vector of pointers to nodes on the boundaries: Boundary_node_pt(b,n). Note that this is pri...
Definition mesh.h:83
virtual ~Mesh()
Virtual Destructor to clean up all memory.
Definition mesh.cc:650
void add_haloed_node_pt(const unsigned &p, Node *&nod_pt)
Add haloed node whose halo counterpart is held on processor p to the storage scheme for haloed nodes.
Definition mesh.h:2029
void get_external_halo_node_pt(Vector< Node * > &external_halo_node_pt)
Get vector of pointers to all external halo nodes.
Definition mesh.h:2331
void add_node_pt(Node *const &node_pt)
Add a (pointer to a) node to the mesh.
Definition mesh.h:619
virtual void read(std::ifstream &restart_file)
Read solution from restart file.
Definition mesh.cc:1130
void set_nodal_time_stepper(TimeStepper *const &time_stepper_pt, const bool &preserve_existing_data)
Set the timestepper associated with the nodal data in the mesh.
Definition mesh.cc:2516
void doc_mesh_distribution(DocInfo &doc_info)
Doc the mesh distribution, to be processed with tecplot macros.
Definition mesh.cc:6470
unsigned self_test()
Self-test: Check elements and nodes. Return 0 for OK.
Definition mesh.cc:778
void get_halo_node_stats(double &av_number, unsigned &max_number, unsigned &min_number)
Get halo node stats for this distributed mesh: Average/max/min number of halo nodes over all processo...
Definition mesh.cc:4845
void set_consistent_pinned_values_for_continuation(ContinuationStorageScheme *const &continuation_stepper_pt)
Set consistent values for pinned data in continuation.
Definition mesh.cc:2436
Vector< Node * > prune_dead_nodes()
Prune nodes. Nodes that have been marked as obsolete are removed from the mesh (and its boundary-node...
Definition mesh.cc:966
Vector< GeneralisedElement * > Element_pt
Vector of pointers to generalised elements.
Definition mesh.h:186
void resize_halo_nodes()
Helper function that resizes halo nodes to the same size as their non-halo counterparts if required....
Definition mesh.cc:4426
std::map< unsigned, Vector< Node * > > External_halo_node_pt
Map of vectors holding the pointers to the external halo nodes.
Definition mesh.h:135
unsigned nhaloed_node()
Total number of haloed nodes in this Mesh.
Definition mesh.h:1993
unsigned nexternal_haloed_node()
Total number of external haloed nodes in this Mesh.
Definition mesh.h:2420
Node * halo_node_pt(const unsigned &p, const unsigned &j)
Access fct to the j-th halo node in this Mesh whose non-halo counterpart is held on processor p.
Definition mesh.h:1922
static bool Suppress_warning_about_empty_mesh_level_time_stepper_function
Boolean used to control warning about empty mesh level timestepper function.
Definition mesh.h:237
unsigned long assign_global_eqn_numbers(Vector< double * > &Dof_pt)
Assign the global equation numbers in the Data stored at the nodes and also internal element Data....
Definition mesh.cc:677
virtual void distribute(OomphCommunicator *comm_pt, const Vector< unsigned > &element_domain, Vector< GeneralisedElement * > &deleted_element_pt, DocInfo &doc_info, const bool &report_stats, const bool &overrule_keep_as_halo_element_status)
Distribute the problem and doc; make this virtual to allow overloading for particular meshes where fu...
Definition mesh.cc:4959
Vector< GeneralisedElement * > haloed_element_pt(const unsigned &p)
Return vector of haloed elements in this Mesh whose haloing counterpart is held on processor p.
Definition mesh.h:1787
Node * shared_node_pt(const unsigned &p, const unsigned &j)
Access fct to the j-th shared node in this Mesh who has a counterpart on processor p.
Definition mesh.h:2118
void null_external_halo_node(const unsigned &p, Node *nod_pt)
Null out specified external halo node (used when deleting duplicates)
Definition mesh.cc:8569
unsigned nshared_node()
Total number of shared nodes in this Mesh.
Definition mesh.h:2066
void remove_null_pointers_from_external_halo_node_storage()
Consolidate external halo node storage by removing nulled out pointes in external halo and haloed sch...
Definition mesh.cc:8589
unsigned long nelement() const
Return number of elements in the mesh.
Definition mesh.h:598
virtual void get_node_reordering(Vector< Node * > &reordering, const bool &use_old_ordering=true) const
Get a reordering of the nodes in the order in which they appear in elements – can be overloaded for m...
Definition mesh.cc:532
void merge_meshes(const Vector< Mesh * > &sub_mesh_pt)
Merge meshes. Note: This simply merges the meshes' elements and nodes (ignoring duplicates; no bounda...
Definition mesh.cc:65
void output_paraview(std::ofstream &file_out, const unsigned &nplot) const
Output in paraview format into specified file. Breaks up each element into sub-elements for plotting ...
Definition mesh.cc:1225
void output_boundaries(std::ostream &outfile)
Output the nodes on the boundaries (into separate tecplot zones)
Definition mesh.cc:1064
unsigned nexternal_halo_element()
Total number of external halo elements in this Mesh.
Definition mesh.h:2230
virtual void dump(std::ofstream &dump_file, const bool &use_old_ordering=true) const
Dump the data in the mesh into a file for restart.
Definition mesh.cc:1088
std::map< unsigned, Vector< GeneralisedElement * > > External_halo_element_pt
External halo(ed) elements are created as and when they are needed to act as source elements for the ...
Definition mesh.h:126
Node *& boundary_node_pt(const unsigned &b, const unsigned &n)
Return pointer to node n on boundary b.
Definition mesh.h:497
unsigned nroot_halo_element()
Total number of root halo elements in this Mesh.
Definition mesh.h:1838
Nodes are derived from Data, but, in addition, have a definite (Eulerian) position in a space of a gi...
Definition nodes.h:906
void set_obsolete()
Mark node as obsolete.
Definition nodes.h:1436
void read(std::ifstream &restart_file)
Read nodal position and associated data from file for restart.
Definition nodes.cc:1996
void position(Vector< double > &pos) const
Compute Vector of nodal positions either directly or via hanging node representation.
Definition nodes.cc:2499
virtual void remove_from_boundary(const unsigned &b)
Broken interface for removing the node from the mesh boundary b Here to provide error reporting.
Definition nodes.cc:2350
void set_non_obsolete()
Mark node as non-obsolete.
Definition nodes.h:1442
unsigned ndim() const
Return (Eulerian) spatial dimension of the node.
Definition nodes.h:1054
virtual void add_to_boundary(const unsigned &b)
Broken interface for adding the node to the mesh boundary b Essentially here for error reporting.
Definition nodes.cc:2336
bool is_hanging() const
Test whether the node is geometrically hanging.
Definition nodes.h:1285
double & x_gen(const unsigned &k, const unsigned &i)
Reference to the generalised position x(k,i). ‘Type’: k; Coordinate direction: i.
Definition nodes.h:1126
HangInfo *const & hanging_pt() const
Return pointer to hanging node data (this refers to the geometric hanging node status) (const version...
Definition nodes.h:1228
An oomph-lib wrapper to the MPI_Comm communicator object. Just contains an MPI_Comm object (which is ...
std::ostream *& stream_pt()
Access function for the stream pointer.
An OomphLibError object which should be thrown when an run-time error is encountered....
===================================================================== A class for handling oomph-lib ...
An OomphLibWarning object which should be created as a temporary object to issue a warning....
RefineableElements are FiniteElements that may be subdivided into children to provide a better local ...
A Class for shape functions. In simple cases, the shape functions have only one index that can be tho...
Definition shape.h:76
IC problem for an elastic body discretised on a given (sub)-mesh. We switch the elements' residuals a...
SolidNode * node_pt(const unsigned long &n)
Return a pointer to the n-th global SolidNode.
Definition mesh.h:2602
void set_lagrangian_nodal_coordinates()
Make the current configuration the undeformed one by setting the nodal Lagrangian coordinates to thei...
Definition mesh.cc:9564
static SolidICProblem Solid_IC_problem
Static problem that can be used to assign initial conditions on a given solid mesh (need to define th...
Definition mesh.h:2687
A Class for nodes that deform elastically (i.e. position is an unknown in the problem)....
Definition nodes.h:1686
unsigned nlagrangian_type() const
Number of types of Lagrangian coordinates used to interpolate the Lagrangian coordinates within the e...
Definition nodes.h:1877
double & xi_gen(const unsigned &k, const unsigned &i)
Reference to the generalised Lagrangian position. ‘Type’: k; 'Coordinate direction: i.
Definition nodes.h:1902
unsigned nlagrangian() const
Return number of lagrangian coordinates.
Definition nodes.h:1870
TAdvectionDiffusionReactionElement<NREAGENT,DIM,NNODE_1D> elements are isoparametric triangular DIM-d...
void output(std::ostream &outfile)
Output function: x,y,u or x,y,z,u.
void output_fct(std::ostream &outfile, const unsigned &n_plot, FiniteElement::SteadyExactSolutionFctPt exact_soln_pt)
Output function for an exact solution: x,y,u_exact.
Base class for time-stepping schemes. Timestepper provides an approximation of the temporal derivativ...
virtual void shift_time_values(Data *const &data_pt)=0
This function advances the Data's time history so that we can move on to the next timestep.
virtual void calculate_predicted_values(Data *const &data_pt)
Do the predictor step for data stored in a Data object (currently empty – overwrite for specific sche...
virtual void assign_initial_values_impulsive(Data *const &data_pt)=0
Initialise the time-history for the Data values corresponding to an impulsive start.
Base class for tree-based refineable meshes.
A generalised tree base class that abstracts the common functionality between the quad- and octrees u...
Definition tree.h:74
RefineableElement * object_pt() const
Return the pointer to the object (RefineableElement) represented by the tree.
Definition tree.h:88
void flush_object()
Flush the object represented by the tree.
Definition tree.h:94
Base class for triangle meshes (meshes made of 2D triangle elements). Note: we choose to template Tri...
A slight extension to the standard template vector class so that we can include "graceful" array rang...
Definition Vector.h:58
bool Doc_comprehensive_timings
Global boolean to switch on comprehensive timing – can probably be declared const false when developm...
bool node_global_position_comparison(Node *nd1_pt, Node *nd2_pt)
Function for ordering nodes. Return true if first node's position is "before" second nodes....
Definition mesh.h:2926
void write_pvd_footer(std::ofstream &pvd_file)
Write the pvd file footer.
Definition mesh.cc:9639
void write_pvd_header(std::ofstream &pvd_file)
Write the pvd file header.
Definition mesh.cc:9615
void write_pvd_information(std::ofstream &pvd_file, const std::string &output_filename, const double &time)
Add name of output file and associated continuous time to pvd file.
Definition mesh.cc:9624
double timer()
returns the time in seconds after some point in past
DRAIG: Change all instances of (SPATIAL_DIM) to (DIM-1).
OomphInfo oomph_info
Single (global) instantiation of the OomphInfo object – this is used throughout the library as a "rep...