GetFEM  5.4.2
getfem_assembling_tensors.cc
1 /*===========================================================================
2 
3  Copyright (C) 2003-2020 Julien Pommier
4 
5  This file is a part of GetFEM
6 
7  GetFEM is free software; you can redistribute it and/or modify it
8  under the terms of the GNU Lesser General Public License as published
9  by the Free Software Foundation; either version 3 of the License, or
10  (at your option) any later version along with the GCC Runtime Library
11  Exception either version 3.1 or (at your option) any later version.
12  This program is distributed in the hope that it will be useful, but
13  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14  or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
15  License and GCC Runtime Library Exception for more details.
16  You should have received a copy of the GNU Lesser General Public License
17  along with this program; if not, write to the Free Software Foundation,
18  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 
20 ===========================================================================*/
21 
22 #include "gmm/gmm_blas_interface.h"
23 #include "getfem/getfem_arch_config.h"
25 #include "getfem/getfem_locale.h"
26 #include "getfem/getfem_mat_elem.h"
27 
28 namespace getfem {
29  size_type vdim_specif_list::nb_mf() const {
30  return std::count_if(begin(),end(),
31  std::mem_fun_ref(&vdim_specif::is_mf_ref));
32  }
33  size_type vdim_specif_list::nbelt() const {
34  size_type sz = 1;
35  for (const_iterator it = begin(); it != end(); ++it) sz *= (*it).dim;
36  return sz;
37  }
38  void vdim_specif_list::build_strides_for_cv
39  (size_type cv, tensor_ranges& r, std::vector<tensor_strides >& str) const {
40  stride_type s = 1, cnt = 0;
41  str.resize(size());
42  r.resize(size());
43  for (const_iterator it = begin(); it != end(); ++it, ++cnt) {
44  if ((*it).is_mf_ref()) {
45  r[cnt] = unsigned((*it).pmf->nb_basic_dof_of_element(cv));
46  //mesh_fem::ind_dof_ct::const_iterator ii
47  // = (*it).pmf->ind_basic_dof_of_element(cv).begin();
48  str[cnt].resize(r[cnt]);
49  for (index_type j=0; j < r[cnt]; ++j) {
50  str[cnt][j] = int((*it).pmf->ind_basic_dof_of_element(cv)[j]*s);
51  }
52  } else {
53  r[cnt] = int((*it).dim);
54  str[cnt].resize(r[cnt]);
55  for (index_type j=0; j < (*it).dim; ++j) {
56  str[cnt][j] = j*s;
57  }
58  }
59  s *= stride_type((*it).dim);
60  }
61  }
62 
63  void ATN::update_childs_required_shape() {
64  for (dim_type i=0; i < nchilds(); ++i) {
65  child(i).merge_required_shape(tensor_shape(child(i).ranges()));
66  }
67  }
68  void ATN::set_number(unsigned &gcnt) {
69  if (number_ == unsigned(-1)) {
70  for (unsigned i=0; i < nchilds(); ++i) child(i).set_number(gcnt);
71  number_ = ++gcnt;
72  }
73  }
74 
75  bool ATN::is_zero_size() {
76  return child(0).is_zero_size();
77  }
78 
79  /*
80  general class for tensor who store their data
81  */
82  class ATN_tensor_w_data : public ATN_tensor {
83  TDIter data_base;
84  protected:
85  std::vector<scalar_type> data;
86  void reinit_();
87  void reinit0()
88  { ATN_tensor_w_data::reinit_(); std::fill(data.begin(), data.end(),0); }
89  };
90 
91  /* note that the data is NOT filled with zeros */
92  void ATN_tensor_w_data::reinit_() {
93  tr.assign_shape(req_shape);
94  tr.init_strides();
95  if (tr.card() > 10000000) {
96  cerr << "warning, a tensor of size " << tr.card()
97  << " will be created, it needs "
98  << tr.card()*sizeof(scalar_type) << " bytes of memory\n";
99  }
100  if (tr.card() == 0) {
101  cerr << "WARNING: tensor " << name()
102  << " will be created with a size of "
103  << ranges() << " and 0 non-null elements!" << endl;
104  }
105  data.resize(tr.card());
106  data_base = &data[0];
107  tr.set_base(data_base);
108  }
109 
110 
111  /*
112  general class for the computation of a reduced product of tensors
113  (templated by the number of product tensors)
114 
115  should be very effective.
116  */
117  typedef std::vector<std::pair<ATN_tensor*,std::string> >
118  reduced_tensor_arg_type;
119 
120  class ATN_reduced_tensor : public ATN_tensor_w_data {
121  /* used for specification of tensors and reduction indices , see below */
122  reduced_tensor_arg_type red;
123  bgeot::tensor_reduction tred;
124  public:
125  void check_shape_update(size_type , dim_type) {
126  shape_updated_ = false;
127  for (dim_type i=0; i < nchilds(); ++i) {
128  if (child(i).is_shape_updated()) {
129  shape_updated_ = true;
130  }
131  }
132  if (shape_updated_) {
133  r_.resize(0);
134  for (dim_type i=0; i < nchilds(); ++i) {
135  std::string s = red_n(i);
136  if (s.size() != child(i).ranges().size()) {
137  ASM_THROW_TENSOR_ERROR("wrong number of indexes for the "
138  << int(i+1)
139  << "th argument of the reduction "
140  << name()
141  << " (ranges=" << child(i).ranges() << ")");
142  }
143  for (size_type j=0; j < s.length(); ++j) {
144  if (s[j] == ' ') r_.push_back(child(i).ranges()[j]);
145  }
146  }
147  }
148  }
149  void update_childs_required_shape() {
150  /* pourrait etre mieux, cf les commentaires de la fonction
151  tensor_reduction::required_shape */
152  for (dim_type n=0; n < nchilds(); ++n) {
153  tensor_shape ts(child(n).ranges());
154  tensor_ranges rn(child(n).ranges());
155  const std::string &s = red[n].second;
156  GMM_ASSERT1(rn.size() == s.size(), "Wrong size !");
157  for (unsigned i=0; i < rn.size(); ++i) {
158  if (s[i] != ' ') {
159  size_type p = s.find(s[i]);
160  if (p != size_type(-1) && p < i && rn[p] != rn[i])
161  ASM_THROW_TENSOR_ERROR("can't reduce the diagonal of a tensor "
162  "of size " << rn << " with '"
163  << s << "'");
164  }
165  }
166  bgeot::tensor_reduction::diag_shape(ts, red[n].second);
167  child(n).merge_required_shape(ts);
168  }
169  }
170 
171  /*
172  r is a container of pair<vtensor&,std::string>
173  where the strings specify the reduction indices:
174 
175  if a_{ik}b_{kjl} is reduced against k and l, then the strings are
176  " k" and "k l"
177  */
178  ATN_reduced_tensor(reduced_tensor_arg_type& r) : red(r) {
179  for (size_type i=0; i < r.size(); ++i) add_child(*red[i].first);
180  }
181 
182  std::string red_n(size_type n) {
183  std::string s = red[n].second;
184  if (s.length() == 0)
185  s.append(red[n].first->ranges().size(), ' ');
186  return s;
187  }
188 
189  private:
190  void reinit_() {
191  tred.clear();
192  for (dim_type i=0; i < red.size(); ++i) {
193  // cerr << "ATN_reduced_tensor::reinit : insertion of r(" << red_n(i)
194  // << "), tr[" << red[i].first->ranges() << "\n"
195  // << red[i].first->tensor() << endl;*/
196  // if (red[i].first->ranges().size() != red_n(i).length()) {
197  // ASM_THROW_TENSOR_ERROR("wrong number of indexes for the "
198  // << int(i+1)
199  // << "th argument of the reduction " << name()
200  // << " (ranges=" << red[i].first->ranges()
201  // << ")");
202  // }
203  tred.insert(red[i].first->tensor(), red_n(i));
204  }
205  /* reserve the memory for the output
206  the memory is set to zero since the reduction may only affect a
207  subset of this tensor hence a part of it would not be initialized
208  */
209  ATN_tensor_w_data::reinit0();
210  /* on fournit notre propre tenseur pour stocker les resultats */
211  tred.prepare(&tensor());
212  }
213 
214  void exec_(size_type , dim_type ) {
215  std::fill(data.begin(), data.end(), 0.); /* do_reduction ne peut pas */
216  /* le faire puisque ce n'est pas lui le proprietaire du tenseur de */
217  /* sortie. */
218  tred.do_reduction();
219  }
220  };
221 
222 
223  /* slice tensor:
224  no need of a temporary storage for the slice, since direct access
225  can be provided via strides.
226  */
227  class ATN_sliced_tensor : public ATN_tensor {
228  dim_type slice_dim;
229  size_type slice_idx;
230  public:
231  ATN_sliced_tensor(ATN_tensor& a, dim_type slice_dim_,
232  size_type slice_idx_) :
233  slice_dim(slice_dim_), slice_idx(slice_idx_) { add_child(a); }
234  void check_shape_update(size_type , dim_type) {
235  if ((shape_updated_ = child(0).is_shape_updated())) {
236  if (slice_dim >= child(0).ranges().size() ||
237  slice_idx >= child(0).ranges()[slice_dim]) {
238  ASM_THROW_TENSOR_ERROR("can't slice tensor " << child(0).ranges()
239  << " at index " << int(slice_idx)
240  << " of dimension " << int(slice_dim));
241  }
242  r_ = child(0).ranges(); r_.erase(r_.begin()+slice_dim);
243  }
244  }
245  void update_childs_required_shape() {
246  tensor_shape ts = req_shape;
247  ts.set_ndim_noclean(dim_type(ts.ndim()+1));
248  ts.shift_dim_num_ge(slice_dim,+1);
249  ts.push_mask(tensor_mask(child(0).ranges()[slice_dim],
250  tensor_mask::Slice(slice_dim, index_type(slice_idx))));
251  child(0).merge_required_shape(ts);
252  }
253  private:
254  void reinit_() {
255  tensor() = tensor_ref(child(0).tensor(),
256  tensor_mask::Slice(slice_dim, index_type(slice_idx)));
257  }
258  void exec_(size_type, dim_type) {}
259  };
260 
261  /* tensor with reoderer indices:
262  t{i,j,k} -> t{j,i,k}
263  reorder= 0 1 2 1 0 2
264  */
265  class ATN_permuted_tensor : public ATN_tensor {
266  std::vector<dim_type> reorder;
267  public:
268  /* attention on ne s'assure pas que reorder est une permutation */
269  ATN_permuted_tensor(ATN_tensor& a, const std::vector<dim_type>& reorder_) :
270  reorder(reorder_) { add_child(a); }
271  private:
272  void check_shape_update(size_type , dim_type) {
273  if ((shape_updated_ = child(0).is_shape_updated())) {
274  if (reorder.size() != child(0).ranges().size())
275  ASM_THROW_TENSOR_ERROR("can't reorder tensor '" << name()
276  << "' of dimensions " << child(0).ranges()
277  << " with this permutation: " << vref(reorder));
278  r_.resize(reorder.size());
279  std::fill(r_.begin(), r_.end(), dim_type(-1));
280 
281  /*
282  --- TODO: A VERIFIER !!!!! ---
283  */
284  for (size_type i=0; i < reorder.size(); ++i)
285  r_[i] = child(0).ranges()[reorder[i]];
286  }
287  }
288  void update_childs_required_shape() {
289  tensor_shape ts = req_shape;
290  ts.permute(reorder, true);
291  child(0).merge_required_shape(ts);
292  }
293  void reinit_() {
294  tensor() = child(0).tensor();
295  tensor().permute(reorder);
296  }
297  void exec_(size_type, dim_type) {}
298  };
299 
300  /* diagonal tensor: take the "diagonal" of a tensor
301  (ie diag(t(i,j,k), {i,k}) == t(i,j,i))
302 
303  /!\ the number of dimensions DO NOT change
304  */
305  class ATN_diagonal_tensor : public ATN_tensor {
306  dim_type i1, i2;
307  public:
308  ATN_diagonal_tensor(ATN_tensor& a, dim_type i1_, dim_type i2_) :
309  i1(i1_), i2(i2_) { add_child(a); }
310  private:
311  void check_shape_update(size_type , dim_type) {
312  if ((shape_updated_ = child(0).is_shape_updated())) {
313  if (i1 >= child(0).ranges().size() || i2 >= child(0).ranges().size() ||
314  i1 == i2 || child(0).ranges()[i1] != child(0).ranges()[i2])
315  ASM_THROW_TENSOR_ERROR("can't take the diagonal of a tensor of "
316  "sizes " << child(0).ranges() <<
317  " at indexes " << int(i1) << " and "
318  << int(i2));
319  r_ = child(0).ranges();
320  }
321  }
322  void update_childs_required_shape() {
323  tensor_shape ts = req_shape.diag_shape(tensor_mask::Diagonal(i1,i2));
324  child(0).merge_required_shape(ts);
325  }
326  void reinit_() {
327  tensor() = tensor_ref(child(0).tensor(), tensor_mask::Diagonal(i1,i2));
328  }
329  void exec_(size_type, dim_type) {}
330  };
331 
332  /* called (if possible, i.e. if not an exact integration) for each
333  integration point during mat_elem->compute() */
334  struct computed_tensor_integration_callback
335  : public mat_elem_integration_callback {
336  bgeot::tensor_reduction red;
337  bool was_called;
338  std::vector<TDIter> tensor_bases; /* each tref of 'red' has a */
339  /* reference into this vector */
340  virtual void exec(bgeot::base_tensor &t, bool first, scalar_type c) {
341  if (first) {
342  resize_t(t);
343  std::fill(t.begin(), t.end(), 0.);
344  was_called = true;
345  }
346  assert(t.size());
347  for (unsigned k=0; k!=eltm.size(); ++k) { /* put in the 'if (first)' ? */
348  tensor_bases[k] = const_cast<TDIter>(&(*eltm[k]->begin()));
349  }
350  red.do_reduction();
351  BLAS_INT one = BLAS_INT(1), n = BLAS_INT(red.out_data.size());
352  assert(n);
353  gmm::daxpy_(&n, &c, const_cast<double*>(&(red.out_data[0])),
354  &one, (double*)&(t[0]), &one);
355  }
356  void resize_t(bgeot::base_tensor &t) {
357  bgeot::multi_index r;
358  if (red.reduced_range.size())
359  r.assign(red.reduced_range.begin(), red.reduced_range.end());
360  else { r.resize(1); r[0]=1; }
361  t.adjust_sizes(r);
362  }
363  };
364 
365  /*
366  ATN_computed_tensor , the largest of all
367 
368  This object has become quite complex. It is the glue with the
369  mat_elem_*. It is able to perform an inline reduction (i.e. a
370  reduction applied during the mat_elem->compute()) when it is
371  allowed (i.e. no exact integration), or do the same reduction
372  after the mat_elem->compute().
373  The reduction may also involve other ATN_tensors.
374  */
375 
376  struct mf_comp_vect;
377 
378  struct mf_comp {
379  pnonlinear_elem_term nlt;
380  const mesh_fem* pmf; /* always defined except when op_type == NORMAL */
381  mf_comp_vect *owner;
382 
383  ATN_tensor *data;
384  std::vector<const mesh_fem*> auxmf; /* used only by nonlinear terms */
385  typedef enum { BASE=1, GRAD=2, HESS=3, NORMAL=4, GRADGT=5, GRADGTINV=6,
386  NONLIN=7, DATA=8 } op_type;
387  typedef enum { PLAIN_SHAPE = 0, VECTORIZED_SHAPE = 1,
388  MATRIXIZED_SHAPE = 2 } field_shape_type;
389  op_type op; /* the numerical values indicates the number
390  of dimensions in the tensor */
391  field_shape_type vshape; /* VECTORIZED_SHAPE if vectorization was required
392  (adds an addiational dimension to the tensor
393  which represents the component number.
394  MATRIXIZED_SHAPE for "matricization" of the
395  field.
396  */
397  std::string reduction;
398  /*
399  vectorization of non-vector FEM:
400 
401  phi1 0 0
402  0 phi1 0
403  0 0 phi1
404  phi2 0 0
405  0 phi2 0
406  0 0 phi2
407  ...
408  */
409  mf_comp(mf_comp_vect *ow, const mesh_fem* pmf_, op_type op_,
410  field_shape_type fst) :
411  nlt(0), pmf(pmf_), owner(ow), data(0), op(op_), vshape(fst) { }
412  mf_comp(mf_comp_vect *ow, const std::vector<const mesh_fem*> vmf,
413  pnonlinear_elem_term nlt_) :
414  nlt(nlt_), pmf(vmf[0]), owner(ow), data(0),
415  auxmf(vmf.begin()+1, vmf.end()), op(NONLIN),
416  vshape(PLAIN_SHAPE) { }
417  mf_comp(mf_comp_vect *ow, ATN_tensor *t) :
418  nlt(0), pmf(0), owner(ow), data(t), op(DATA), vshape(PLAIN_SHAPE) {}
419  void push_back_dimensions(size_type cv, tensor_ranges &rng,
420  bool only_reduced=false) const;
421  bool reduced(unsigned i) const {
422  if (i >= reduction.size()) return false;
423  else return reduction[i] != ' ';
424  }
425  };
426 
427  struct mf_comp_vect : public std::vector<mf_comp> {
428  const mesh_im *main_im;
429  public:
430  mf_comp_vect() : std::vector<mf_comp>(), main_im(0) { }
431  mf_comp_vect(const mf_comp_vect &other)
432  : std::vector<mf_comp>(other), main_im(other.main_im) {
433  for (size_type i=0; i < size(); ++i) (*this)[i].owner = this;
434  }
435  void set_im(const mesh_im &mim) { main_im = &mim; }
436  const mesh_im& get_im() const { return *main_im; }
437  private:
438  mf_comp_vect& operator=(const mf_comp_vect &other);
439  };
440 
441  void mf_comp::push_back_dimensions(size_type cv, tensor_ranges &rng,
442  bool only_reduced) const {
443  switch (op) {
444  case NONLIN:
445  {
446  const bgeot::multi_index &sizes = nlt->sizes(cv);
447  for (unsigned j=0; j < sizes.size(); ++j)
448  if (!only_reduced || !reduced(j))
449  rng.push_back(short_type(sizes[j]));
450  }
451  break;
452  case DATA:
453  for (unsigned i=0; i < data->ranges().size(); ++i)
454  if (!only_reduced || !reduced(i))
455  rng.push_back(data->ranges()[i]);
456  break;
457  case NORMAL:
458  assert(pmf==0);
459  assert(&owner->get_im());
460  assert(owner->get_im().linked_mesh().dim() != dim_type(-1));
461  rng.push_back(owner->get_im().linked_mesh().dim());
462  break;
463  case GRADGT:
464  case GRADGTINV:
465  assert(pmf==0);
466  assert(&owner->get_im());
467  rng.push_back(owner->get_im().linked_mesh().dim()); // N
468  rng.push_back(owner->get_im().linked_mesh().structure_of_convex(cv)->dim()); // P
469  break;
470  default:
471  unsigned d = 0;
472  if (!only_reduced || !reduced(d))
473  rng.push_back(unsigned(pmf->nb_basic_dof_of_element(cv)));
474  ++d;
475  if (vshape == mf_comp::VECTORIZED_SHAPE) {
476  if (!only_reduced || !reduced(d)) rng.push_back(pmf->get_qdim());
477  ++d;
478  }
479  if (vshape == mf_comp::MATRIXIZED_SHAPE) {
480  if (!only_reduced || !reduced(d)) {
481  GMM_ASSERT1(pmf->get_qdims().size() == 2, "Non matrix field");
482  rng.push_back(dim_type(pmf->get_qdims()[0]));
483  }
484  ++d;
485  if (!only_reduced || !reduced(d)) rng.push_back(dim_type(pmf->get_qdims()[1]));
486  ++d;
487  }
488 
489  if (op == GRAD || op == HESS) {
490  if (!only_reduced || !reduced(d))
491  rng.push_back(pmf->linked_mesh().dim());
492  ++d;
493  }
494  if (op == HESS) {
495  if (!only_reduced || !reduced(d))
496  rng.push_back(pmf->linked_mesh().dim());
497  ++d;
498  }
499  break;
500  }
501  }
502 
503 
504  class ATN_computed_tensor : public ATN_tensor {
505  mf_comp_vect mfcomp;
506  mat_elem_pool mep;
507  pmat_elem_computation pmec;
508  pmat_elem_type pme;
509  pintegration_method pim;
511  base_tensor t;
512  std::vector<scalar_type> data;
513  TDIter data_base;
514  stride_type tsize;
515  dal::bit_vector req_bv; /* bit_vector of values the mat_elem has to compute
516  (useful when only a subset is required from the
517  possibly very large elementary tensor) */
518  bool has_inline_reduction; /* true if used with reductions inside the comp, for example:
519  "comp(Grad(#1)(:,i).Grad(#2)(:,i))" */
520  computed_tensor_integration_callback icb; /* callback for inline reductions */
521 
522  /* if inline reduction are to be done, but were not possible (i.e. if exact
523  integration was used) then a fallback is used: apply the reduction
524  afterward, on the large expanded tensor */
525  bgeot::tensor_reduction fallback_red;
526  bool fallback_red_uptodate;
527  TDIter fallback_base;
528 
529  size_type cv_shape_update;
530  //mat_elem_inline_reduction inline_red;
531  public:
532  ATN_computed_tensor(const mf_comp_vect &mfcomp_) :
533  mfcomp(mfcomp_), pmec(0), pme(0), pim(0), pgt(0), data_base(0) {
534  has_inline_reduction = false;
535  bool in_data = false;
536  for (size_type i=0; i < mfcomp.size(); ++i) {
537  if (mfcomp[i].reduction.size() || mfcomp[i].op == mf_comp::DATA) {
538  has_inline_reduction = true;
539  if (mfcomp[i].op == mf_comp::DATA) { add_child(*mfcomp[i].data); in_data = true; }
540  }
541  if (mfcomp[i].op != mf_comp::DATA && in_data) {
542  /* constraint of fallback 'do_post_reduction' */
543  ASM_THROW_ERROR("data tensors inside comp() cannot be intermixed with Grad() and Base() etc., they must appear LAST");
544  }
545  }
546  }
547 
548  private:
549  /* mostly for non-linear terms, such as a 3x3x3x3 tensor which may have
550  many symmetries or many null elements.. in that case, it is preferable
551  for getfem_mat_elem to handle only a sufficient subset of the tensor,
552  and build back the full tensor via adequate strides and masks */
553 
554  /* append a dimension (full) to tref */
555  stride_type add_dim(const tensor_ranges& rng, dim_type d, stride_type s, tensor_ref &tref) {
556  assert(d < rng.size());
557  tensor_strides v;
558  index_type r = rng[d];
559  tensor_mask m; m.set_full(d, r);
560  v.resize(r);
561  for (index_type i=0; i < r; ++i) v[i] = s*i;
562  assert(tref.masks().size() == tref.strides().size());
563  tref.set_ndim_noclean(dim_type(tref.ndim()+1));
564  tref.push_mask(m);
565  tref.strides().push_back(v);
566  return s*r;
567  }
568 
569  /* append a vectorized dimension to tref -- works also for cases
570  where target_dim > 1
571  */
572  stride_type add_vdim(const tensor_ranges& rng, dim_type d,
573  index_type target_dim, stride_type s,
574  tensor_ref &tref) {
575  assert(d < rng.size()-1);
576  index_type r = rng[d], q=rng[d+1];
577  index_type qmult = q/target_dim;
578  assert(r%qmult == 0); assert(q%qmult==0);
579 
580  tensor_strides v;
581  tensor_ranges trng(2); trng[0] = q; trng[1] = r;
582  index_set ti(2); ti[0] = dim_type(d+1); ti[1] = d;
583  tensor_mask m(trng,ti);
584  v.resize(r*target_dim);
585  tensor_ranges cnt(2);
586  for (index_type i=0; i < r; ++i) {
587  // the value in cnt[1] is not directly used as the loop variable
588  // as this makes the INTEL 2019 compiler wrongly optimize the loop check,
589  // making the outer loop go one more than it needs to;
590  // creating SEH exceptions
591  cnt[1] = i;
592  for (index_type k=0; k < target_dim; ++k) {
593  cnt[0] = k*qmult + (cnt[1]%qmult); //(cnt[1] % qmult)*target_dim + k;
594  m.set_mask_val(m.lpos(cnt), true);
595  v[cnt[1]*target_dim+k] = s*( k * r/qmult + cnt[1]/qmult); //s*((cnt[1]/qmult)*target_dim + k);
596  }
597  }
598  assert(tref.masks().size() == tref.strides().size());
599  tref.set_ndim_noclean(dim_type(tref.ndim()+2));
600  tref.push_mask(m);
601  tref.strides().push_back(v);
602  return s*(r/qmult)*target_dim;
603  }
604 
605  /* append a matrixized dimension to tref -- works also for cases
606  where target_dim > 1 (in that case the rows are "vectorized")
607 
608  for example, the Base(RT0) in 2D (3 dof, target_dim=2) is:
609  [0 1 2;
610  3 4 5]
611 
612 
613  if we set it in a mesh_fem of qdim = 3x2 , we produce the sparse
614  elementary tensor 9x3x2 =
615 
616  x x x y y y
617  0 . . 3 . . <- phi1
618  . 0 . . 3 . <- phi2
619  . . 0 . . 3 <- phi3
620  1 . . 4 . . <- phi4
621  . 1 . . 4 . <- phi5
622  . . 1 . . 4 <- phi6
623  2 . . 5 . . <- phi7
624  . 2 . . 5 . <- phi8
625  . . 2 . . 5 <- phi9
626 
627  */
628  stride_type add_mdim(const tensor_ranges& rng, dim_type d,
629  index_type target_dim, stride_type s, tensor_ref &tref) {
630  assert(d < rng.size()-2);
631 
632  /* r = nb_dof, q = nb_rows, p = nb_cols */
633  index_type r = rng[d], q=rng[d+1], p=rng[d+2];
634  index_type qmult = (q*p)/target_dim;
635 
636  assert(r % q == 0);
637  assert(p % target_dim == 0);
638  assert(r % (p/target_dim) == 0);
639 
640  tensor_strides v;
641  tensor_ranges trng(3); trng[0] = q; trng[1] = p; trng[2] = r;
642  index_set ti(3); ti[0] = dim_type(d+1); ti[1] = dim_type(d+2); ti[2] = d;
643  tensor_mask m(trng,ti);
644  v.resize(r*target_dim);
645  tensor_ranges cnt(3);
646  for (cnt[2]=0; cnt[2] < r; cnt[2]++) { // loop over virtual dof number {
647  for (index_type k=0; k < target_dim; ++k) {
648  unsigned pos = (cnt[2]*target_dim+k) % (q*p);
649  //unsigned ii = (pos%q), jj = (pos/q);
650  unsigned ii = (pos/p), jj = (pos%p);
651  cnt[0] = ii; cnt[1] = jj;
652  //cerr << " set_mask_val(lpos(" << cnt[0] << "," << cnt[1] << "," << cnt[2] << ") = " << m.lpos(cnt) << ")\n";
653  m.set_mask_val(m.lpos(cnt), true);
654  v[cnt[2]*target_dim+k] = s*(k * r/qmult + cnt[2]/qmult); //s*((cnt[2]/qmult)*target_dim + k);
655  }
656  }
657  assert(tref.masks().size() == tref.strides().size());
658  tref.set_ndim_noclean(dim_type(tref.ndim()+3));
659  tref.push_mask(m);
660  // cerr << "rng = " << rng << "\nr=" << r << ", q=" << q << ", p="
661  // << p << ", qmult =" << qmult << ", target_dim= " << target_dim
662  // << "\n" << "m = " << m << "\nv=" << v << "\n";
663  tref.strides().push_back(v);
664  return s*(r/qmult)*target_dim;
665  }
666 
667 
668  /* called when the FEM has changed */
669  void update_pmat_elem(size_type cv) {
670  pme = 0;
671  for (size_type i=0; i < mfcomp.size(); ++i) {
672  if (mfcomp[i].op == mf_comp::DATA) continue;
673  pfem fem = (mfcomp[i].pmf ? mfcomp[i].pmf->fem_of_element(cv)
674  : NULL);
675  pmat_elem_type pme2 = 0;
676  switch (mfcomp[i].op) {
677  case mf_comp::BASE: pme2 = mat_elem_base(fem); break;
678  case mf_comp::GRAD: pme2 = mat_elem_grad(fem); break;
679  case mf_comp::HESS: pme2 = mat_elem_hessian(fem); break;
680  case mf_comp::NORMAL: pme2 = mat_elem_unit_normal(); break;
681  case mf_comp::GRADGT:
682  case mf_comp::GRADGTINV:
683  pme2 = mat_elem_grad_geotrans(mfcomp[i].op == mf_comp::GRADGTINV);
684  break;
685  case mf_comp::NONLIN: {
686  std::vector<pfem> ftab(1+mfcomp[i].auxmf.size());
687  ftab[0] = fem;
688  for (unsigned k=0; k < mfcomp[i].auxmf.size(); ++k)
689  ftab[k+1] = mfcomp[i].auxmf[k]->fem_of_element(cv);
690  pme2 = mat_elem_nonlinear(mfcomp[i].nlt, ftab);
691  } break;
692  case mf_comp::DATA: /*ignore*/;
693  }
694  if (pme == 0) pme = pme2;
695  else pme = mat_elem_product(pme, pme2);
696  }
697 
698  if (pme == 0) pme = mat_elem_empty();
699  //ASM_THROW_ERROR("no Base() or Grad() or etc!");
700  }
701 
702 
703 
704  size_type
705  push_back_mfcomp_dimensions(size_type cv, const mf_comp& mc,
706  unsigned &d, const bgeot::tensor_ranges &rng,
707  bgeot::tensor_ref &tref, size_type tsz=1) {
708  if (mc.op == mf_comp::NONLIN) {
709  for (size_type j=0; j < mc.nlt->sizes(cv).size(); ++j)
710  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
711  } else if (mc.op == mf_comp::DATA) {
712  assert(tsz == 1);
713  tref = mc.data->tensor();
714  tsz *= tref.card();
715  d += tref.ndim();
716  } else if (mc.op == mf_comp::NORMAL) {
717  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
718  } else if (mc.op == mf_comp::GRADGT ||
719  mc.op == mf_comp::GRADGTINV) {
720  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
721  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
722  } else {
723  size_type target_dim = mc.pmf->fem_of_element(cv)->target_dim();
724  size_type qdim = mc.pmf->get_qdim();
725 
726  //cerr << "target_dim = " << target_dim << ", qdim = " << qdim << ", vectorize=" << mc.vectorize << ", rng=" << rng << " d=" << d << ", tsz=" << tsz << "\n";
727  if (mc.vshape == mf_comp::VECTORIZED_SHAPE) {
728  if (target_dim == qdim) {
729  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
730  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
731  } else {
732  tsz = add_vdim(rng,dim_type(d),index_type(target_dim),
733  stride_type(tsz), tref);
734  d += 2;
735  }
736  } else if (mc.vshape == mf_comp::MATRIXIZED_SHAPE) {
737  if (target_dim == qdim) {
738  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
739  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
740  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
741  } else {
742  tsz = add_mdim(rng, dim_type(d), index_type(target_dim),
743  stride_type(tsz), tref);
744  d += 3;
745  }
746  } else tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
747  if (mc.op == mf_comp::GRAD || mc.op == mf_comp::HESS) {
748  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
749  }
750  if (mc.op == mf_comp::HESS) {
751  tsz = add_dim(rng, dim_type(d++), stride_type(tsz), tref);
752  }
753  }
754  return tsz;
755  }
756 
757  void update_shape_with_inline_reduction(size_type cv) {
758  fallback_red_uptodate = false;
759  icb.tensor_bases.resize(mfcomp.size()); /* todo : resize(nb_mfcomp_not_data) */
760  icb.red.clear();
761  for (size_type i=0; i < mfcomp.size(); ++i) {
762  tensor_ref tref;
763  tensor_ranges rng;
764  unsigned d = 0;
765  mfcomp[i].push_back_dimensions(cv,rng);
766  push_back_mfcomp_dimensions(cv,mfcomp[i], d, rng, tref);
767  assert(tref.ndim() == rng.size() && d == rng.size());
768  if (mfcomp[i].reduction.size() == 0)
769  mfcomp[i].reduction.insert(size_type(0), tref.ndim(), ' ');
770  if (mfcomp[i].op != mf_comp::DATA) /* should already have the correct base */
771  tref.set_base(icb.tensor_bases[i]);
772  tref.update_idx2mask();
773  if (mfcomp[i].reduction.size() != tref.ndim()) {
774  ASM_THROW_TENSOR_ERROR("wrong number of indices for the "<< int(i+1)
775  << "th argument of the reduction "<< name()
776  << " (expected " << int(tref.ndim())
777  << " indexes, got "
778  << mfcomp[i].reduction.size());
779  }
780  icb.red.insert(tref, mfcomp[i].reduction);
781  }
782  icb.red.prepare();
783  icb.red.result(tensor());
784  r_.resize(tensor().ndim());
785  for (dim_type i=0; i < tensor().ndim(); ++i) r_[i] = tensor().dim(i);
786  tsize = tensor().card();
787  //cerr << "update_shape_with_inline_reduction: tensor=" << tensor()
788  // << "\nr_=" << r_ << ", tsize=" << tsize << "\n";
789  }
790 
791  void update_shape_with_expanded_tensor(size_type cv) {
792  icb.red.clear();
793  unsigned d = 0;
794  for (size_type i=0; i < mfcomp.size(); ++i) {
795  tsize = stride_type(push_back_mfcomp_dimensions(cv, mfcomp[i], d, r_, tensor(), tsize));
796  }
797  assert(d == r_.size());
798  tensor().update_idx2mask();
799  }
800 
801  void check_shape_update(size_type cv, dim_type) {
802  const mesh_im& mi = mfcomp.get_im();
803  pintegration_method pim2;
805  bool fem_changed = false;
806  pgt2 = mi.linked_mesh().trans_of_convex(cv);
807  pim2 = mi.int_method_of_element(cv);
808  // cerr << "computed tensor cv=" << cv << " f=" << int(face) << "\n";
809  /* shape is considered for update only if the FEM changes,
810  changes of pgt or integration method does not affect shape
811  (only the mat_elem) */
812  cv_shape_update = cv;
813  shape_updated_ = (pme == 0); //false;
814  for (size_type i=0; i < nchilds(); ++i)
815  shape_updated_ = shape_updated_ || child(i).is_shape_updated();
816  for (size_type i=0; shape_updated_ == false && i < mfcomp.size(); ++i) {
817  if (mfcomp[i].pmf == 0) continue;
818  if (current_cv == size_type(-1)) {
819  shape_updated_ = true; fem_changed = true;
820  } else {
821  fem_changed = fem_changed ||
822  (mfcomp[i].pmf->fem_of_element(current_cv) !=
823  mfcomp[i].pmf->fem_of_element(cv));
824  /* for FEM with non-constant nb_dof.. */
825  shape_updated_ = shape_updated_ ||
826  (mfcomp[i].pmf->nb_basic_dof_of_element(current_cv) !=
827  mfcomp[i].pmf->nb_basic_dof_of_element(cv));
828  }
829  }
830  if (shape_updated_) {
831  r_.resize(0);
832  /* get the new ranges */
833  for (unsigned i=0; i < mfcomp.size(); ++i)
834  mfcomp[i].push_back_dimensions(cv, r_, true);
835  }
836  if (fem_changed || shape_updated_) {
837  /* build the new mat_elem structure */
838  update_pmat_elem(cv);
839  }
840  if (shape_updated_ || fem_changed || pgt != pgt2 || pim != pim2) {
841  pgt = pgt2; pim = pim2;
842  pmec = mep(pme, pim, pgt, has_inline_reduction);
843  }
844  }
845 
846  void reinit_() {
847  if (!shape_updated_) return;
848  tensor().clear();
849  tsize = 1;
850  if (has_inline_reduction) {
851  update_shape_with_inline_reduction(cv_shape_update);
852  } else {
853  update_shape_with_expanded_tensor(cv_shape_update);
854  }
855  data_base = 0;
856  tensor().set_base(data_base);
857  }
858  void update_childs_required_shape() {
859  }
860 
861  /* fallback when inline reduction is not possible:
862  do the reduction after evaluation of the mat_elem */
863  void do_post_reduction(size_type cv) {
864  if (!fallback_red_uptodate) {
865  fallback_red_uptodate = true;
866  std::string s;
867  size_type sz = 1;
868  tensor_ref tref; tensor_ranges r;
869  unsigned cnt, d=0;
870  /* insert the tensorial product of Grad() etc */
871  for (cnt=0; cnt < mfcomp.size() && mfcomp[cnt].op != mf_comp::DATA; ++cnt) {
872  mfcomp[cnt].push_back_dimensions(cv,r);
873  sz = push_back_mfcomp_dimensions(cv, mfcomp[cnt], d, r, tref, sz);
874  s += mfcomp[cnt].reduction;
875  }
876  fallback_red.clear();
877  tref.set_base(fallback_base);
878  fallback_red.insert(tref, s);
879  /* insert the optional data tensors */
880  for ( ; cnt < mfcomp.size(); ++cnt) {
881  assert(mfcomp[cnt].op == mf_comp::DATA);
882  fallback_red.insert(mfcomp[cnt].data->tensor(), mfcomp[cnt].reduction);
883  }
884  fallback_red.prepare();
885  fallback_red.result(tensor()); /* this SHOULD NOT, IN ANY CASE change the shape or strides of tensor() .. */
886  assert(icb.red.reduced_range == fallback_red.reduced_range);
887  }
888  icb.resize_t(t);
889  fallback_base = &(*t.begin());
890  fallback_red.do_reduction();
891  }
892 
893  void exec_(size_type cv, dim_type face) {
894  const mesh_im& mim = mfcomp.get_im();
895  for (unsigned i=0; i < mfcomp.size(); ++i) {
896  if (mfcomp[i].op == mf_comp::DATA) {
897  size_type fullsz = 1;
898  for (unsigned j=0; j < mfcomp[i].data->ranges().size(); ++j)
899  fullsz *= mfcomp[i].data->ranges()[j];
900  if (fullsz != size_type(mfcomp[i].data->tensor().card()))
901  ASM_THROW_TENSOR_ERROR("aaarg inline reduction will explode with non-full tensors. "
902  "Complain to the author, I was too lazy to do that properly");
903  }
904  }
905  icb.was_called = false;
906  if (face == dim_type(-1)) {
907  pmec->gen_compute(t, mim.linked_mesh().points_of_convex(cv), cv,
908  has_inline_reduction ? &icb : 0);
909  } else {
910  pmec->gen_compute_on_face(t, mim.linked_mesh().points_of_convex(cv), face, cv,
911  has_inline_reduction ? &icb : 0);
912  }
913 
914  if (has_inline_reduction && icb.was_called == false) {
915  do_post_reduction(cv);
916  data_base = &fallback_red.out_data[0];
917  } else data_base = &(*t.begin());
918  GMM_ASSERT1(t.size() == size_type(tsize),
919  "Internal error: bad size " << t.size() << " should be " << tsize);
920  }
921  };
922 
923 
924  /* extract data for each dof of the convex */
925  class ATN_tensor_from_dofs_data : public ATN_tensor_w_data {
926  const base_asm_data *basm; //scalar_type* global_array;
927  vdim_specif_list vdim;
928  multi_tensor_iterator mti;
929  tensor_ranges e_r;
930  std::vector< tensor_strides > e_str;
931  public:
932  ATN_tensor_from_dofs_data(const base_asm_data *basm_,
933  const vdim_specif_list& d) :
934  basm(basm_), vdim(d) {
935  }
936  void check_shape_update(size_type cv, dim_type) {
937  shape_updated_ = false;
938  r_.resize(vdim.size());
939  for (dim_type i=0; i < vdim.size(); ++i) {
940  if (vdim[i].is_mf_ref()) {
941  size_type nbde = vdim[i].pmf->nb_basic_dof_of_element(cv);
942  if (nbde != ranges()[i])
943  { r_[i] = unsigned(nbde); shape_updated_ = true; }
944  } else if (vdim[i].dim != ranges()[i]) {
945  r_[i] = unsigned(vdim[i].dim);
946  shape_updated_ = true;
947  }
948  }
949  }
950  virtual void init_required_shape() { req_shape = tensor_shape(ranges()); }
951 
952  private:
953  void reinit_() {
954  ATN_tensor_w_data::reinit_();
955  mti.assign(tensor(), true);
956  }
957  void exec_(size_type cv, dim_type ) {
958  vdim.build_strides_for_cv(cv, e_r, e_str);
959  assert(e_r == ranges());
960  mti.rewind();
961  basm->copy_with_mti(e_str, mti, (vdim.nb_mf() >= 1) ? vdim[0].pmf : 0);
962  }
963  };
964 
965  /* enforce symmetry of a 2D tensor
966  (requiring only the upper-triangle of its child and
967  duplicating it) */
968  class ATN_symmetrized_tensor : public ATN_tensor_w_data {
969  multi_tensor_iterator mti;
970  public:
971  ATN_symmetrized_tensor(ATN_tensor& a) { add_child(a); }
972  void check_shape_update(size_type , dim_type) {
973  if ((shape_updated_ = child(0).is_shape_updated())) {
974  if (child(0).ranges().size() != 2 ||
975  child(0).ranges()[0] != child(0).ranges()[1])
976  ASM_THROW_TENSOR_ERROR("can't symmetrize a non-square tensor "
977  "of sizes " << child(0).ranges());
978  r_ = child(0).ranges();
979  }
980  }
981  void update_childs_required_shape() {
982  tensor_shape ts = req_shape;
983  tensor_shape ts2 = req_shape;
984  index_set perm(2); perm[0] = 1; perm[1] = 0; ts2.permute(perm);
985  ts.merge(ts2, false);
986  tensor_mask dm; dm.set_triangular(ranges()[0],0,1);
987  tensor_shape tsdm(2); tsdm.push_mask(dm);
988  ts.merge(tsdm, true);
989  child(0).merge_required_shape(ts);
990  }
991 
992  private:
993  void reinit_() {
994  req_shape.set_full(ranges()); // c'est plus simple comme ca
995  ATN_tensor_w_data::reinit0();
996  mti.assign(child(0).tensor(),true);
997  }
998  void exec_(size_type, dim_type) {
999  std::fill(data.begin(), data.end(), 0.);
1000  mti.rewind();
1001  index_type n = ranges()[0];
1002  do {
1003  index_type i=mti.index(0), j=mti.index(1);
1004  data[i*n+j]=data[j*n+i]=mti.p(0);
1005  } while (mti.qnext1());
1006  }
1007  };
1008 
1009 
1010  template<class UnaryOp> class ATN_unary_op_tensor
1011  : public ATN_tensor_w_data {
1012  multi_tensor_iterator mti;
1013  public:
1014  ATN_unary_op_tensor(ATN_tensor& a) { add_child(a); }
1015  void check_shape_update(size_type , dim_type) {
1016  if ((shape_updated_ = (ranges() != child(0).ranges())))
1017  r_ = child(0).ranges();
1018  }
1019  private:
1020  void reinit_() {
1021  ATN_tensor_w_data::reinit0();
1022  mti.assign(tensor(), child(0).tensor(),false);
1023  }
1024  void update_cv_(size_type, dim_type) {
1025  mti.rewind();
1026  do {
1027  mti.p(0) = UnaryOp()(mti.p(1));
1028  } while (mti.qnext2());
1029  }
1030  };
1031 
1032  /* sum AND scalar scaling */
1033  class ATN_tensors_sum_scaled : public ATN_tensor_w_data {
1034  std::vector<multi_tensor_iterator> mti;
1035  std::vector<scalar_type> scales; /* utile pour des somme "scaled" du genre 0.5*t1 + 0.5*t2 */
1036  public:
1037  ATN_tensors_sum_scaled(ATN_tensor& t1, scalar_type s1) {
1038  add_child(t1);
1039  scales.resize(1); scales[0]=s1;
1040  }
1041  void push_scaled_tensor(ATN_tensor& t, scalar_type s) {
1042  add_child(t); scales.push_back(s);
1043  }
1044  void check_shape_update(size_type , dim_type) {
1045  if ((shape_updated_ = child(0).is_shape_updated()))
1046  r_ = child(0).ranges();
1047  for (size_type i=1; i < nchilds(); ++i)
1048  if (ranges() != child(i).ranges())
1049  ASM_THROW_TENSOR_ERROR("can't add two tensors of sizes " <<
1050  ranges() << " and " << child(i).ranges());
1051  }
1052  void apply_scale(scalar_type s) {
1053  for (size_type i=0; i < scales.size(); ++i) scales[i] *= s;
1054  }
1055  ATN_tensors_sum_scaled* is_tensors_sum_scaled() { return this; }
1056  private:
1057  void reinit_() {
1058  ATN_tensor_w_data::reinit0();
1059  mti.resize(nchilds());
1060  for (size_type i=0; i < nchilds(); ++i)
1061  mti[i].assign(tensor(), child(i).tensor(),false);
1062  }
1063  void exec_(size_type, dim_type) {
1064  //if (cv == 0) {
1065  // cerr << "ATN_tensors_sum["<< name() << "] req_shape="
1066  // << req_shape << endl;
1067  //}
1068  std::fill(data.begin(), data.end(), 0.);
1069  mti[0].rewind();
1070  do {
1071  mti[0].p(0) = mti[0].p(1)*scales[0];
1072  } while (mti[0].qnext2());
1073  for (size_type i=1; i < nchilds(); ++i) {
1074  mti[i].rewind();
1075  do {
1076  mti[i].p(0) = mti[i].p(0)+mti[i].p(1)*scales[i];
1077  } while (mti[i].qnext2());
1078  }
1079  }
1080  };
1081 
1082  class ATN_tensor_scalar_add : public ATN_tensor_w_data {
1083  scalar_type v;
1084  multi_tensor_iterator mti;
1085  int sgn; /* v+t or v-t ? */
1086  public:
1087  ATN_tensor_scalar_add(ATN_tensor& a, scalar_type v_, int sgn_) :
1088  v(v_), sgn(sgn_) { add_child(a); }
1089  void check_shape_update(size_type , dim_type) {
1090  if ((shape_updated_ = child(0).is_shape_updated()))
1091  r_ = child(0).ranges();
1092  }
1093  private:
1094  void reinit_() {
1095  ATN_tensor_w_data::reinit_();
1096  mti.assign(tensor(), child(0).tensor(),false);
1097  }
1098  void exec_(size_type, dim_type) {
1099  std::fill(data.begin(), data.end(), v);
1100  mti.rewind();
1101  do {
1102  if (sgn > 0)
1103  mti.p(0) += mti.p(1);
1104  else mti.p(0) -= mti.p(1);
1105  } while (mti.qnext2());
1106  }
1107  };
1108 
1109  class ATN_print_tensor : public ATN {
1110  std::string name;
1111  public:
1112  ATN_print_tensor(ATN_tensor& a, std::string n_)
1113  : name(n_) { add_child(a); }
1114  private:
1115  void reinit_() {}
1116  void exec_(size_type cv, dim_type face) {
1117  multi_tensor_iterator mti(child(0).tensor(), true);
1118  cout << "------- > evaluation of " << name << ", at" << endl;
1119  cout << "convex " << cv;
1120  if (face != dim_type(-1)) cout << ", face " << int(face);
1121  cout << endl;
1122  cout << " size = " << child(0).ranges() << endl;
1123  mti.rewind();
1124  do {
1125  cout << " @[";
1126  for (size_type i=0; i < child(0).ranges().size(); ++i)
1127  cout <<(i>0 ? "," : "") << mti.index(dim_type(i));
1128  cout << "] = " << mti.p(0) << endl;
1129  } while (mti.qnext1());
1130  }
1131  };
1132 
1133 
1134  /*
1135  -------------------
1136  analysis of the supplied string
1137  -----------------
1138  */
1139 
1140  std::string asm_tokenizer::syntax_err_print() {
1141  std::string s;
1142  if (tok_pos - err_msg_mark > 80) err_msg_mark = tok_pos - 40;
1143  if (str.length() - err_msg_mark < 80) s = tok_substr(err_msg_mark, str.length());
1144  else { s = tok_substr(err_msg_mark,err_msg_mark+70); s.append(" ... (truncated)"); }
1145  s += "\n" + std::string(std::max(int(tok_pos - err_msg_mark),0), '-') + "^^";
1146  return s;
1147  }
1148 
1149  void asm_tokenizer::get_tok() {
1150  standard_locale sl;
1151  curr_tok_ival = -1;
1152  while (tok_pos < str.length() && isspace(str[tok_pos])) ++tok_pos;
1153  if (tok_pos == str.length()) {
1154  curr_tok_type = END; tok_len = 0;
1155  } else if (strchr("{}(),;:=-.*/+", str[tok_pos])) {
1156  curr_tok_type = tok_type_enum(str[tok_pos]); tok_len = 1;
1157  } else if (str[tok_pos] == '$' || str[tok_pos] == '#' || str[tok_pos] == '%') {
1158  curr_tok_type = str[tok_pos] == '$' ? ARGNUM_SELECTOR :
1159  (str[tok_pos] == '#' ? MFREF : IMREF);
1160  tok_len = 1;
1161  curr_tok_ival = 0;
1162  while (isdigit(str[tok_pos+tok_len])) {
1163  curr_tok_ival*=10;
1164  curr_tok_ival += str[tok_pos+tok_len] - '0';
1165  ++tok_len;
1166  }
1167  curr_tok_ival--;
1168  } else if (isalpha(str[tok_pos])) {
1169  curr_tok_type = IDENT;
1170  tok_len = 0;
1171  while (isalnum(str[tok_pos+tok_len]) || str[tok_pos+tok_len] == '_') ++tok_len;
1172  } else if (isdigit(str[tok_pos])) {
1173  curr_tok_type = NUMBER;
1174  char *p;
1175  curr_tok_dval = strtod(&str[0]+tok_pos, &p);
1176  tok_len = p - &str[0] - tok_pos;
1177  }
1178  if (tok_pos < str.length())
1179  curr_tok = str.substr(tok_pos, tok_len);
1180  else
1181  curr_tok.clear();
1182  }
1183 
1184  const mesh_fem& generic_assembly::do_mf_arg_basic() {
1185  if (tok_type() != MFREF) ASM_THROW_PARSE_ERROR("expecting mesh_fem reference");
1186  if (tok_mfref_num() >= mftab.size())
1187  ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_fem #" << tok_mfref_num()+1);
1188  const mesh_fem& mf_ = *mftab[tok_mfref_num()]; advance();
1189  return mf_;
1190  }
1191 
1192  const mesh_fem& generic_assembly::do_mf_arg(std::vector<const mesh_fem*> * multimf) {
1193  if (!multimf) advance(); // special hack for NonLin$i(#a,#b,..)
1194  accept(OPEN_PAR,"expecting '('");
1195  const mesh_fem &mf_ = do_mf_arg_basic();
1196  if (multimf) {
1197  multimf->resize(1); (*multimf)[0] = &mf_;
1198  while (advance_if(COMMA)) {
1199  if (tok_type() != MFREF) ASM_THROW_PARSE_ERROR("expecting mesh_fem reference");
1200  if (tok_mfref_num() >= mftab.size())
1201  ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_fem #" << tok_mfref_num()+1);
1202  multimf->push_back(mftab[tok_mfref_num()]); advance();
1203  }
1204  }
1205  accept(CLOSE_PAR, "expecting ')'");
1206  return mf_;
1207  }
1208 
1209  /* "inline" reduction operations inside comp(..) */
1210  std::string generic_assembly::do_comp_red_ops() {
1211  std::string s;
1212  if (advance_if(OPEN_PAR)) {
1213  size_type j = 0;
1214  do {
1215  if (tok_type() == COLON) {
1216  s.push_back(' '); advance(); j++;
1217  } else if (tok_type() == IDENT) {
1218  if ((tok().length()==1 && isalpha(tok()[0])) || islower(tok()[0])) {
1219  s.push_back(tok()[0]); advance(); j++;
1220  } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() <<
1221  "', only lower case characters allowed");
1222  }
1223  } while (advance_if(COMMA));
1224  accept(CLOSE_PAR, "expecting ')'");
1225  }
1226  return s;
1227  }
1228 
1229  static mf_comp::field_shape_type get_shape(const std::string &s) {
1230  if (s[0] == 'v') return mf_comp::VECTORIZED_SHAPE;
1231  else if (s[0] == 'm') return mf_comp::MATRIXIZED_SHAPE;
1232  else return mf_comp::PLAIN_SHAPE;
1233  }
1234 
1235  ATN_tensor* generic_assembly::do_comp() {
1236  accept(OPEN_PAR, "expecting '('");
1237  mf_comp_vect what;
1238  bool in_data = false;
1239  /* the first optional argument is the "main" mesh_im, i.e. the one
1240  whose integration methods are used, (and whose linked_mesh is
1241  used for mf_comp::NORMAL, mf_comp::GRADGT etc computations). If
1242  not given, then the first mesh_im pushed is used (then expect
1243  problems when assembling simultaneously on two different
1244  meshes).
1245  */
1246  if (tok_type() == IMREF) {
1247  if (tok_imref_num() >= imtab.size())
1248  ASM_THROW_PARSE_ERROR("reference to a non-existant mesh_im %" << tok_imref_num()+1);
1249  what.set_im(*imtab[tok_imref_num()]); advance();
1250  accept(COMMA, "expecting ','");
1251  } else {
1252  what.set_im(*imtab[0]);
1253  }
1254  do {
1255  if (tok_type() == CLOSE_PAR) break;
1256  if (tok_type() != IDENT) ASM_THROW_PARSE_ERROR("expecting Base or Grad or Hess, Normal, etc..");
1257  std::string f = tok();
1258  const mesh_fem *pmf = 0;
1259  if (f.compare("Base")==0 || f.compare("vBase")==0 || f.compare("mBase")==0) {
1260  pmf = &do_mf_arg();
1261  what.push_back(mf_comp(&what, pmf, mf_comp::BASE, get_shape(f)));
1262  } else if (f.compare("Grad")==0 || f.compare("vGrad")==0 || f.compare("mGrad")==0) {
1263  pmf = &do_mf_arg();
1264  what.push_back(mf_comp(&what, pmf, mf_comp::GRAD, get_shape(f)));
1265  } else if (f.compare("Hess")==0 || f.compare("vHess")==0 || f.compare("mHess")==0) {
1266  pmf = &do_mf_arg();
1267  what.push_back(mf_comp(&what, pmf, mf_comp::HESS, get_shape(f)));
1268  } else if (f.compare("NonLin")==0) {
1269  size_type num = 0; /* default value */
1270  advance();
1271  if (tok_type() == ARGNUM_SELECTOR) { num = tok_argnum(); advance(); }
1272  if (num >= innonlin.size()) ASM_THROW_PARSE_ERROR("NonLin$" << num << " does not exist");
1273  std::vector<const mesh_fem*> allmf;
1274  pmf = &do_mf_arg(&allmf); what.push_back(mf_comp(&what, allmf, innonlin[num]));
1275  } else if (f.compare("Normal") == 0) {
1276  advance();
1277  accept(OPEN_PAR,"expecting '('"); accept(CLOSE_PAR,"expecting ')'");
1278  pmf = 0; what.push_back(mf_comp(&what, pmf, mf_comp::NORMAL, mf_comp::PLAIN_SHAPE));
1279  } else if (f.compare("GradGT") == 0 ||
1280  f.compare("GradGTInv") == 0) {
1281  advance();
1282  accept(OPEN_PAR,"expecting '('"); accept(CLOSE_PAR,"expecting ')'");
1283  pmf = 0;
1284  what.push_back(mf_comp(&what, pmf,
1285  f.compare("GradGT") == 0 ?
1286  mf_comp::GRADGT :
1287  mf_comp::GRADGTINV, mf_comp::PLAIN_SHAPE));
1288  } else {
1289  if (vars.find(f) != vars.end()) {
1290  what.push_back(mf_comp(&what, vars[f]));
1291  in_data = true;
1292  advance();
1293  } else {
1294  ASM_THROW_PARSE_ERROR("expecting Base, Grad, vBase, NonLin ...");
1295  }
1296  }
1297 
1298  if (!in_data && f[0] != 'v' && f[0] != 'm' && pmf && pmf->get_qdim() != 1 && f.compare("NonLin")!=0) {
1299  ASM_THROW_PARSE_ERROR("Attempt to use a vector mesh_fem as a scalar mesh_fem");
1300  }
1301  what.back().reduction = do_comp_red_ops();
1302  } while (advance_if(PRODUCT));
1303  accept(CLOSE_PAR, "expecting ')'");
1304 
1305  return record(std::make_unique<ATN_computed_tensor>(what));
1306  }
1307 
1308  void generic_assembly::do_dim_spec(vdim_specif_list& lst) {
1309  lst.resize(0);
1310  accept(OPEN_PAR, "expecting '('");
1311  while (true) {
1312  if (tok_type() == IDENT) {
1313  if (tok().compare("mdim")==0) lst.push_back(vdim_specif(do_mf_arg().linked_mesh().dim()));
1314  else if (tok().compare("qdim")==0) lst.push_back(vdim_specif(do_mf_arg().get_qdim()));
1315  else ASM_THROW_PARSE_ERROR("expecting mdim(#mf) or qdim(#mf) or a number or a mesh_fem #id");
1316  } else if (tok_type() == NUMBER) {
1317  lst.push_back(vdim_specif(tok_number_ival()+1));
1318  advance();
1319  } else if (tok_type() == MFREF) {
1320  lst.push_back(vdim_specif(&do_mf_arg_basic()));
1321  } else if (tok_type() != CLOSE_PAR) ASM_THROW_PARSE_ERROR("expecting mdim(#mf) or qdim(#mf) or a number or a mesh_fem #id");
1322  /* if (mfcnt && !lst.back().is_mf_ref())
1323  ASM_THROW_PARSE_ERROR("#mf argument must be given after numeric dimensions");*/
1324  if (advance_if(CLOSE_PAR)) break;
1325  accept(COMMA,"expecting ',' or ')'");
1326  }
1327  }
1328 
1329 
1330  ATN_tensor* generic_assembly::do_data() {
1331  // ATN_tensor *t;
1332  size_type datanum = 0; /* par defaut */
1333  if (tok_type() != OPEN_PAR) { /* on peut oublier le numero de dataset */
1334  if (tok_type() != ARGNUM_SELECTOR)
1335  ASM_THROW_PARSE_ERROR("expecting dataset number");
1336  datanum = tok_argnum();
1337  advance();
1338  }
1339  if (datanum >= indata.size())
1340  ASM_THROW_PARSE_ERROR("wrong dataset number: " << datanum);
1341 
1342  vdim_specif_list sz;
1343  do_dim_spec(sz);
1344 
1345  if (sz.nbelt() != indata[datanum]->vect_size())
1346  ASM_THROW_PARSE_ERROR("invalid size for data argument " << datanum+1 <<
1347  " real size is " << indata[datanum]->vect_size()
1348  << " expected size is " << sz.nbelt());
1349  return record(std::make_unique<ATN_tensor_from_dofs_data>(indata[datanum].get(), sz));
1350  }
1351 
1352  std::pair<ATN_tensor*, std::string>
1353  generic_assembly::do_red_ops(ATN_tensor* t) {
1354  std::string s;
1355 
1356  if (advance_if(OPEN_PAR)) {
1357  size_type j = 0;
1358  do {
1359  if (tok_type() == COLON) {
1360  s.push_back(' '); advance(); j++;
1361  } else if (tok_type() == NUMBER) {
1362  t = record(std::make_unique<ATN_sliced_tensor>(*t, dim_type(j),
1363  tok_number_ival())); advance();
1364  } else if (tok_type() == IDENT) {
1365  if ((tok().length()==1 && isalpha(tok()[0])) || islower(tok()[0])) {
1366  s.push_back(tok()[0]); advance(); j++;
1367  } else ASM_THROW_PARSE_ERROR("invalid reduction index '" << tok() <<
1368  "', only lower case chars allowed");
1369  }
1370  } while (advance_if(COMMA));
1371  accept(CLOSE_PAR, "expecting ')'");
1372  }
1373  return std::pair<ATN_tensor*,std::string>(t,s);
1374  }
1375 
1376  /*
1377  ( expr )
1378  variable
1379  comp(..)
1380  data(data)
1381  */
1382  tnode generic_assembly::do_tens() {
1383  tnode t;
1384  push_mark();
1385  if (advance_if(OPEN_PAR)) {
1386  t = do_expr();
1387  accept(CLOSE_PAR, "expecting ')'");
1388  } else if (tok_type() == NUMBER) {
1389  t.assign(tok_number_dval()); advance();
1390  } else if (tok_type() == IDENT) {
1391  if (vars.find(tok()) != vars.end()) {
1392  t.assign(vars[tok()]); advance();
1393  } else if (tok().compare("comp")==0) {
1394  advance(); t.assign(do_comp());
1395  } else if (tok().compare("data")==0) {
1396  advance(); t.assign(do_data());
1397  } else if (tok().compare("sym")==0) {
1398  advance();
1399  tnode t2 = do_expr();
1400  if (t2.type() != tnode::TNTENSOR)
1401  ASM_THROW_PARSE_ERROR("can't symmetrise a scalar!");
1402  t.assign(record(std::make_unique<ATN_symmetrized_tensor>(*t2.tensor())));
1403  } else ASM_THROW_PARSE_ERROR("unknown identifier: " << tok());
1404  } else ASM_THROW_PARSE_ERROR("unexpected token: " << tok());
1405  pop_mark();
1406  return t;
1407  }
1408 
1409  /*
1410  handle tensorial product/reduction
1411 
1412  a(:,i).b(j,i).(c)(1,:,i)
1413  */
1414  tnode generic_assembly::do_prod() {
1415  reduced_tensor_arg_type ttab;
1416 
1417  do {
1418  tnode t = do_tens();
1419  if (t.type() == tnode::TNCONST) {
1420  if (ttab.size() == 0) return t;
1421  else ASM_THROW_PARSE_ERROR("can't mix tensor and scalar into a "
1422  "reduction expression");
1423  }
1424  ttab.push_back(do_red_ops(t.tensor()));
1425  } while (advance_if(PRODUCT));
1426  if (ttab.size() == 1 && ttab[0].second.length() == 0) {
1427  return tnode(ttab[0].first);
1428  } else {
1429  return tnode(record(std::make_unique<ATN_reduced_tensor>(ttab)));
1430  }
1431  }
1432 
1433  /* calls do_prod() once,
1434  and handle successive reordering/diagonals transformations */
1435  tnode generic_assembly::do_prod_trans() {
1436  tnode t = do_prod();
1437  while (advance_if(OPEN_BRACE)) {
1438  index_set reorder;
1439  size_type j = 0;
1440  dal::bit_vector check_permut;
1441  if (t.type() != tnode::TNTENSOR)
1442  ASM_THROW_PARSE_ERROR("can't use reorder braces on a constant!");
1443  for (;; ++j) {
1444  size_type i;
1445  if (tok_type() == COLON) i = j;
1446  else if (tok_type() == NUMBER) i = tok_number_ival(1000);
1447  else ASM_THROW_PARSE_ERROR("only numbers or colons allowed here");
1448  if (check_permut.is_in(i)) { /* on prend la diagonale du tenseur */
1449  t = tnode(record(std::make_unique<ATN_diagonal_tensor>(*t.tensor(), dim_type(i),
1450  dim_type(j))));
1451  check_permut.add(j);
1452  reorder.push_back(dim_type(j));
1453  } else {
1454  check_permut.add(i);
1455  reorder.push_back(dim_type(i));
1456  }
1457  advance();
1458  if (advance_if(CLOSE_BRACE)) break;
1459  accept(COMMA, "expecting ','");
1460  }
1461  if (check_permut.first_false() != reorder.size()) {
1462  cerr << check_permut << endl;
1463  cerr << vref(reorder) << endl;
1464  ASM_THROW_PARSE_ERROR("you did not give a real permutation:"
1465  << vref(reorder));
1466  }
1467  t = tnode(record(std::make_unique<ATN_permuted_tensor>(*t.tensor(), reorder)));
1468  }
1469  return t;
1470  }
1471 
1472  /*
1473  term := prod_trans*prod_trans/prod_trans ...
1474  */
1475  tnode generic_assembly::do_term() {
1476  push_mark();
1477  err_set_mark();
1478  tnode t = do_prod_trans();
1479  while (true) {
1480  bool mult;
1481  if (advance_if(MULTIPLY)) mult = true;
1482  else if (advance_if(DIVIDE)) mult = false;
1483  else break;
1484  tnode t2 = do_prod();
1485  if (mult == false && t.type() == tnode::TNCONST
1486  && t2.type() == tnode::TNTENSOR)
1487  ASM_THROW_PARSE_ERROR("can't divide a constant by a tensor");
1488  if (t.type() == tnode::TNTENSOR && t2.type() == tnode::TNTENSOR) {
1489  ASM_THROW_PARSE_ERROR("tensor term-by-term productor division not "
1490  "implemented yet! are you sure you need it ?");
1491  } else if (t.type() == tnode::TNCONST && t2.type() == tnode::TNCONST) {
1492  if (mult)
1493  t.assign(t.xval()*t2.xval());
1494  else {
1495  t2.check0();
1496  t.assign(t.xval()/t2.xval());
1497  }
1498  } else {
1499  if (t.type() != tnode::TNTENSOR) std::swap(t,t2);
1500  scalar_type v = t2.xval();
1501  if (!mult) {
1502  if (v == 0.) { ASM_THROW_PARSE_ERROR("can't divide by zero"); }
1503  else v = 1./v;
1504  }
1505  if (t.tensor()->is_tensors_sum_scaled() && !t.tensor()->is_frozen()) {
1506  t.tensor()->is_tensors_sum_scaled()->apply_scale(v);
1507  } else {
1508  t.assign(record(std::make_unique<ATN_tensors_sum_scaled>(*t.tensor(), v)));
1509  }
1510  }
1511  }
1512  pop_mark();
1513  return t;
1514  }
1515 
1516  /*
1517  expr := term + term - term + ...
1518  suboptimal for things like t1+1-2-1 (which gives (((t1+1)-2)-1) )
1519  ... could be fixed but noone needs that i guess
1520  */
1521  tnode generic_assembly::do_expr() {
1522  bool negt=false;
1523  push_mark();
1524  if (advance_if(MINUS)) negt = true;
1525  tnode t = do_term();
1526  if (negt) {
1527  if (t.type() == tnode::TNCONST) t.assign(-t.xval());
1528  else t.assign(record(std::make_unique<ATN_tensor_scalar_add>(*t.tensor(), 0., -1)));
1529  }
1530  while (true) {
1531  int plus;
1532  if (advance_if(PLUS)) plus = +1;
1533  else if (advance_if(MINUS)) plus = -1;
1534  else break;
1535  tnode t2 = do_term();
1536  if (t.type() == tnode::TNTENSOR && t2.type() == tnode::TNTENSOR) {
1537  if (!t.tensor()->is_tensors_sum_scaled() || t.tensor()->is_frozen()) {
1538  t.assign(record(std::make_unique<ATN_tensors_sum_scaled>(*t.tensor(), +1)));
1539  }
1540  t.tensor()->is_tensors_sum_scaled()
1541  ->push_scaled_tensor(*t2.tensor(), scalar_type(plus));
1542  } else if (t.type() == tnode::TNCONST && t2.type() == tnode::TNCONST) {
1543  t.assign(t.xval()+t2.xval()*plus);
1544  } else {
1545  int tsgn = 1;
1546  if (t.type() != tnode::TNTENSOR)
1547  { std::swap(t,t2); if (plus<0) tsgn = -1; }
1548  else if (plus<0) t2.assign(-t2.xval());
1549  t.assign(record(std::make_unique<ATN_tensor_scalar_add>(*t.tensor(), t2.xval(),
1550  tsgn)));
1551  }
1552  }
1553  pop_mark();
1554  return t;
1555  }
1556 
1557  /* instr := ident '=' expr |
1558  print expr |
1559  M(#mf,#mf) '+=' expr |
1560  V(#mf) '+=' expr */
1561  void generic_assembly::do_instr() {
1562  enum { wALIAS, wOUTPUT_ARRAY, wOUTPUT_MATRIX, wPRINT, wERROR }
1563  what = wERROR;
1564  std::string ident;
1565 
1566  /* get the rhs */
1567  if (tok_type() != IDENT) ASM_THROW_PARSE_ERROR("expecting identifier");
1568  if (vars.find(tok()) != vars.end())
1569  ASM_THROW_PARSE_ERROR("redefinition of identifier " << tok());
1570 
1571  push_mark();
1572  ident = tok();
1573  advance();
1574 
1575  size_type print_mark = 0;
1576  size_type arg_num = size_type(-1);
1577 
1578  vdim_specif_list vds;
1579 
1580  if (ident.compare("print") == 0) {
1581  print_mark = tok_mark();
1582  what = wPRINT;
1583  } else if (tok_type() == ARGNUM_SELECTOR ||
1584  tok_type() == OPEN_PAR) {
1585  if (tok_type() == ARGNUM_SELECTOR) {
1586  arg_num = tok_argnum();
1587  advance();
1588  } else { arg_num = 0; }
1589 
1590  do_dim_spec(vds);
1591 
1592  /* check the validity of the output statement */
1593  if (ident.compare("V")==0) {
1594  what = wOUTPUT_ARRAY;
1595  if (arg_num >= outvec.size())
1596  { outvec.resize(arg_num+1); outvec[arg_num] = 0; }
1597  /* if we are allowed to dynamically create vectors */
1598  if (outvec[arg_num] == 0) {
1599  if (vec_fact != 0) {
1600  tensor_ranges r(vds.size());
1601  for (size_type i=0; i < vds.size(); ++i)
1602  r[i] = unsigned(vds[i].dim);
1603  outvec[arg_num] = std::shared_ptr<base_asm_vec>(std::shared_ptr<base_asm_vec>(), vec_fact->create_vec(r));
1604  }
1605  else ASM_THROW_PARSE_ERROR("output vector $" << arg_num+1
1606  << " does not exist");
1607  }
1608  } else if (vds.nb_mf()==2 && vds.size() == 2 && ident.compare("M")==0) {
1609  what = wOUTPUT_MATRIX;
1610  if (arg_num >= outmat.size())
1611  { outmat.resize(arg_num+1); outmat[arg_num] = 0; }
1612  /* if we are allowed to dynamically create matrices */
1613  if (outmat[arg_num] == 0) {
1614  if (mat_fact != 0)
1615  outmat[arg_num] = std::shared_ptr<base_asm_mat>
1616  (std::shared_ptr<base_asm_mat>(),
1617  mat_fact->create_mat(vds[0].pmf->nb_dof(),
1618  vds[1].pmf->nb_dof()));
1619  else ASM_THROW_PARSE_ERROR("output matrix $" << arg_num+1
1620  << " does not exist");
1621  }
1622  } else ASM_THROW_PARSE_ERROR("not a valid output statement");
1623 
1624  accept(PLUS);
1625  accept(EQUAL);
1626  } else if (advance_if(EQUAL)) {
1627  what = wALIAS;
1628  } else ASM_THROW_PARSE_ERROR("missing '=' or ':='");
1629 
1630  tnode t = do_expr();
1631  if (t.type() != tnode::TNTENSOR)
1632  ASM_THROW_PARSE_ERROR("left hand side is a constant, not a tensor!");
1633 
1634  switch (what) {
1635  case wPRINT: {
1636  record_out(std::make_unique<ATN_print_tensor>(*t.tensor(), tok_substr(print_mark,
1637  tok_mark())));
1638  } break;
1639  case wOUTPUT_ARRAY: {
1640  record_out(outvec[arg_num]->build_output_tensor(*t.tensor(), vds));
1641  } break;
1642  case wOUTPUT_MATRIX: {
1643  record_out(outmat[arg_num]->build_output_tensor(*t.tensor(),
1644  *vds[0].pmf,
1645  *vds[1].pmf));
1646  } break;
1647  case wALIAS: {
1648  vars[ident] = t.tensor(); t.tensor()->freeze();
1649  } break;
1650  default: GMM_ASSERT3(false, ""); break;
1651  }
1652  pop_mark();
1653  }
1654 
1655  struct atn_number_compare {
1656  bool operator()(const std::unique_ptr<ATN_tensor> &a,
1657  const std::unique_ptr<ATN_tensor> &b) {
1658  assert(a.get() && b.get());
1659  return (a->number() < b->number());
1660  }
1661  };
1662 
1663  struct outvar_compare {
1664  bool operator()(const std::unique_ptr<ATN> &a,
1665  const std::unique_ptr<ATN> &b) {
1666  assert(a.get() && b.get());
1667  return (a->number() < b->number());
1668  }
1669  };
1670 
1671  void generic_assembly::parse() {
1672  if (parse_done) return;
1673  do {
1674  if (tok_type() == END) break;
1675  do_instr();
1676  } while (advance_if(SEMICOLON));
1677  if (tok_type() != END) ASM_THROW_PARSE_ERROR("unexpected token: '"
1678  << tok() << "'");
1679  if (outvars.size() == 0) cerr << "warning: assembly without output\n";
1680 
1681  /* reordering of atn_tensors and outvars */
1682  unsigned gcnt = 0;
1683  for (size_type i=0; i < outvars.size(); ++i)
1684  outvars[i]->set_number(gcnt);
1685 
1686  std::sort(atn_tensors.begin(), atn_tensors.end(), atn_number_compare());
1687  std::sort(outvars.begin(), outvars.end(), outvar_compare());
1688 
1689  /* remove non-numbered (ie unused) atn_tensors */
1690  while (atn_tensors.size()
1691  && atn_tensors.back()->number() == unsigned(-1)) {
1692  cerr << "warning: the expression " << atn_tensors.back()->name()
1693  << " won't be evaluated since it is not used!\n";
1694  atn_tensors.pop_back();
1695  }
1696  parse_done = true;
1697  }
1698 
1699  /* caution: the order of the loops is really important */
1700  void generic_assembly::exec(size_type cv, dim_type face) {
1701  bool update_shapes = false;
1702  for (size_type i=0; i < atn_tensors.size(); ++i) {
1703  atn_tensors[i]->check_shape_update(cv,face);
1704  update_shapes = (update_shapes || atn_tensors[i]->is_shape_updated());
1705  /* if (atn_tensors[i]->is_shape_updated()) {
1706  cerr << "[cv=" << cv << ",f=" << int(face) << "], shape_updated: "
1707  << typeid(*atn_tensors[i]).name()
1708  << " [" << atn_tensors[i]->name()
1709  << "]\n -> r=" << atn_tensors[i]->ranges() << "\n ";
1710  }
1711  */
1712  }
1713 
1714  if (update_shapes) {
1715 
1716  /*cerr << "updated shapes: cv=" << cv << " face=" << int(face) << ": ";
1717  for (size_type k=0; k < mftab.size(); ++k)
1718  cerr << mftab[k]->nb_basic_dof_of_element(cv) << " "; cerr << "\n";
1719  */
1720 
1721  for (auto &&t : atn_tensors)
1722  t->init_required_shape();
1723 
1724  for (auto &&v : outvars)
1725  v->update_childs_required_shape();
1726 
1727  for (size_type i=atn_tensors.size()-1; i!=size_type(-1); --i)
1728  atn_tensors[i]->update_childs_required_shape();
1729 
1730  for (auto &&t : atn_tensors)
1731  t->reinit();
1732 
1733  for (auto &&v : outvars)
1734  v->reinit();
1735  }
1736  for (auto &&t : atn_tensors)
1737  t->exec(cv,face);
1738  for (auto &&v : outvars)
1739  v->exec(cv, face);
1740  }
1741 
1742  struct cv_fem_compare {
1743  const std::vector<const mesh_fem *> &mf;
1744  cv_fem_compare(const std::vector<const mesh_fem *>& mf_) : mf(mf_) {}
1745  bool operator()(size_type a, size_type b) const {
1746  for (size_type i=0; i < mf.size(); ++i) {
1747  pfem pfa(mf[i]->fem_of_element(a));
1748  pfem pfb(mf[i]->fem_of_element(b));
1749  /* sort by nb_dof and then by fem */
1750  unsigned nba = unsigned(pfa->nb_dof(a));
1751  unsigned nbb = unsigned(pfb->nb_dof(b));
1752  if (nba < nbb) {
1753  return true;
1754  } else if (nba > nbb) {
1755  return false;
1756  } else if (pfa < pfb) {
1757  return true;
1758  }
1759  }
1760  return false;
1761  }
1762  };
1763 
1764  /* reorder the convexes in order to minimize the number of
1765  shape modifications during the assembly (since this can be
1766  very expensive) */
1767  static void get_convex_order(const dal::bit_vector& cvlst,
1768  const std::vector<const mesh_im *>& imtab,
1769  const std::vector<const mesh_fem *>& mftab,
1770  const dal::bit_vector& candidates,
1771  std::vector<size_type>& cvorder) {
1772  cvorder.reserve(candidates.card()); cvorder.resize(0);
1773 
1774  for (dal::bv_visitor cv(candidates); !cv.finished(); ++cv) {
1775  if (cvlst.is_in(cv) &&
1776  imtab[0]->int_method_of_element(cv) != im_none()) {
1777  bool ok = true;
1778  for (size_type i=0; i < mftab.size(); ++i) {
1779  if (!mftab[i]->convex_index().is_in(cv)) {
1780  ok = false;
1781  // ASM_THROW_ERROR("the convex " << cv << " has no FEM for the #"
1782  // << i+1 << " mesh_fem");
1783  }
1784  }
1785  if (ok) {
1786  cvorder.push_back(cv);
1787  }
1788  } else if (!imtab[0]->linked_mesh().convex_index().is_in(cv)) {
1789  ASM_THROW_ERROR("the convex " << cv << " is not part of the mesh");
1790  } else {
1791  /* skip convexes without integration method */
1792  }
1793  }
1794  //std::sort(cvorder.begin(), cvorder.end(), cv_fem_compare(mftab));
1795  }
1796 
1797  void generic_assembly::consistency_check() {
1798  //if (mftab.size() == 0) ASM_THROW_ERROR("no mesh_fem for assembly!");
1799  if (imtab.size() == 0)
1800  ASM_THROW_ERROR("no mesh_im (integration methods) given for assembly!");
1801  const mesh& m = imtab[0]->linked_mesh();
1802  for (unsigned i=0; i < mftab.size(); ++i) {
1803  if (&mftab[i]->linked_mesh() != &m)
1804  ASM_THROW_ERROR("the mesh_fem/mesh_im live on different meshes!");
1805  }
1806  for (unsigned i=0; i < imtab.size(); ++i) {
1807  if (&imtab[i]->linked_mesh() != &m)
1808  ASM_THROW_ERROR("the mesh_fem/mesh_im live on different meshes!");
1809  }
1810  if (imtab.size() == 0)
1811  ASM_THROW_ERROR("no integration method !");
1812  }
1813 
1815  std::vector<size_type> cv;
1816  r.from_mesh(imtab.at(0)->linked_mesh());
1817  r.error_if_not_homogeneous();
1818 
1819 
1820  consistency_check();
1821  get_convex_order(imtab.at(0)->convex_index(), imtab, mftab, r.index(), cv);
1822  parse();
1823 
1824  for (size_type i=0; i < cv.size(); ++i) {
1825  mesh_region::face_bitset nf = r[cv[i]];
1826  dim_type f = dim_type(-1);
1827  while (nf.any())
1828  {
1829  if (nf[0]) exec(cv[i],f);
1830  nf >>= 1;
1831  f++;
1832  }
1833  }
1834  }
1835 } /* end of namespace */
getfem_assembling_tensors.h
Generic assembly implementation.
bgeot::size_type
size_t size_type
used as the common size type in the library
Definition: bgeot_poly.h:49
getfem_mat_elem.h
elementary computations (used by the generic assembly).
getfem::mat_elem_unit_normal
pmat_elem_type mat_elem_unit_normal(void)
Give a pointer to the structure describing the elementary matrix which compute the unit normal on the...
Definition: getfem_mat_elem_type.cc:108
getfem::mat_elem_nonlinear
pmat_elem_type mat_elem_nonlinear(pnonlinear_elem_term, std::vector< pfem > pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of a nonl...
Definition: getfem_mat_elem_type.cc:166
bgeot::short_type
gmm::uint16_type short_type
used as the common short type integer in the library
Definition: bgeot_config.h:72
getfem
GEneric Tool for Finite Element Methods.
Definition: getfem_accumulated_distro.h:46
getfem::generic_assembly::assembly
void assembly(const mesh_region &region=mesh_region::all_convexes())
do the assembly on the specified region (boundary or set of convexes)
Definition: getfem_assembling_tensors.cc:1814
getfem::mat_elem_hessian
pmat_elem_type mat_elem_hessian(pfem pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of the he...
Definition: getfem_mat_elem_type.cc:138
getfem_locale.h
thread safe standard locale with RAII semantics
getfem::mesh_region::index
const dal::bit_vector & index() const
Index of the region convexes, or the convexes from the partition on the current thread.
Definition: getfem_mesh_region.cc:237
getfem::mat_elem_grad
pmat_elem_type mat_elem_grad(pfem pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of the gr...
Definition: getfem_mat_elem_type.cc:123
getfem::mat_elem_base
pmat_elem_type mat_elem_base(pfem pfi)
Give a pointer to the structure describing the elementary matrix which compute the integral of the ba...
Definition: getfem_mat_elem_type.cc:96
getfem::pfem
std::shared_ptr< const getfem::virtual_fem > pfem
type of pointer on a fem description
Definition: getfem_fem.h:244
getfem::mesh_region::from_mesh
const mesh_region & from_mesh(const mesh &m) const
For regions which have been built with just a number 'id', from_mesh(m) sets the current region to 'm...
Definition: getfem_mesh_region.cc:72
getfem::mat_elem_grad_geotrans
pmat_elem_type mat_elem_grad_geotrans(bool inverted)
Return the gradient of the geometrical transformation ("K" in the getfem++ kernel doc....
Definition: getfem_mat_elem_type.cc:115
getfem::mat_elem_product
pmat_elem_type mat_elem_product(pmat_elem_type a, pmat_elem_type b)
Give a pointer to the structure describing the elementary matrix which computes the integral of produ...
Definition: getfem_mat_elem_type.cc:175
getfem::im_none
pintegration_method im_none(void)
return IM_NONE
Definition: getfem_integration.cc:1293
gmm_blas_interface.h
gmm interface for fortran BLAS.
bgeot::pgeometric_trans
std::shared_ptr< const bgeot::geometric_trans > pgeometric_trans
pointer type for a geometric transformation
Definition: bgeot_geometric_trans.h:186
getfem::mesh_region
structure used to hold a set of convexes and/or convex faces.
Definition: getfem_mesh_region.h:55