34 struct emelem_comp_key_ :
virtual public dal::static_stored_object_key {
36 pintegration_method ppi;
41 bool prefer_comp_on_real_element;
42 bool compare(
const static_stored_object_key &oo)
const override{
43 auto &o =
dynamic_cast<const emelem_comp_key_ &
>(oo);
44 if (pmt < o.pmt)
return true;
45 if (o.pmt < pmt)
return false;
46 if (ppi < o.ppi)
return true;
47 if (o.ppi < ppi)
return false;
48 if (pgt < o.pgt)
return true;
49 if (o.pgt < pgt)
return false;
50 return prefer_comp_on_real_element < o.prefer_comp_on_real_element;
52 bool equal(
const static_stored_object_key &oo)
const override{
53 auto &o =
dynamic_cast<const emelem_comp_key_ &
>(oo);
55 if (pmt == o.pmt && ppi == o.ppi && pgt == o.pgt)
return true;
57 auto pmat_key = dal::key_of_stored_object(pmt);
58 auto poo_mat_key = dal::key_of_stored_object(o.pmt);
59 if (*pmat_key != *poo_mat_key)
return false;
61 auto pint_key = dal::key_of_stored_object(ppi);
62 auto poo_int_key = dal::key_of_stored_object(o.ppi);
63 if (*pint_key != *poo_int_key)
return false;
65 auto pgt_key = dal::key_of_stored_object(pgt);
66 auto poo_gt_key = dal::key_of_stored_object(o.pgt);
67 if (*pgt_key != *poo_gt_key)
return false;
71 emelem_comp_key_(pmat_elem_type pm, pintegration_method pi,
73 { pmt = pm; ppi = pi; pgt = pg; prefer_comp_on_real_element = on_relt; }
74 emelem_comp_key_(
void) { }
77 struct emelem_comp_structure_ :
public mat_elem_computation {
78 bgeot::pgeotrans_precomp pgp;
79 ppoly_integration ppi;
80 papprox_integration pai;
82 mutable std::vector<base_tensor> mref;
83 mutable std::vector<pfem_precomp> pfp;
84 mutable std::vector<base_tensor> elmt_stored;
86 std::deque<short_type> grad_reduction, hess_reduction, trans_reduction;
87 std::deque<short_type> K_reduction;
88 std::deque<pfem> trans_reduction_pfi;
89 mutable base_vector un, up;
90 mutable bool faces_computed;
91 mutable bool volume_computed;
93 bool computed_on_real_element;
95 size_type sz =
sizeof(emelem_comp_structure_) +
96 mref.capacity()*
sizeof(base_tensor) +
101 trans_reduction_pfi.size()*
sizeof(
pfem);
103 for (
size_type i=0; i < mref.size(); ++i) sz += mref[i].memsize();
107 emelem_comp_structure_(pmat_elem_type pm, pintegration_method pi,
109 bool prefer_comp_on_real_element) {
112 pgp = bgeot::geotrans_precomp(pg, pi->pintegration_points(), pi);
114 switch (pi->type()) {
116 ppi = pi->exact_method(); pai = 0; is_ppi =
true;
break;
118 ppi = 0; pai = pi->approx_method(); is_ppi =
false;
break;
120 GMM_ASSERT1(
false,
"Attempt to use IM_NONE integration method "
124 faces_computed = volume_computed =
false;
125 is_linear = pgt->is_linear();
126 computed_on_real_element = !is_linear || (prefer_comp_on_real_element && !is_ppi);
128 nbf = pgt->structure()->nb_faces();
129 dim = pgt->structure()->dim();
130 mat_elem_type::const_iterator it = pme->begin(), ite = pme->end();
133 for (
short_type k = 0; it != ite; ++it, ++k) {
135 if ((*it).pfi->is_on_real_element()) computed_on_real_element =
true;
136 GMM_ASSERT1(!is_ppi || (((*it).pfi->is_polynomial()) && is_linear
137 && !computed_on_real_element),
138 "Exact integration not allowed in this context");
140 if ((*it).t != GETFEM_NONLINEAR_ && !((*it).pfi->is_equivalent())) {
142 trans_reduction.push_back(k);
143 trans_reduction_pfi.push_back((*it).pfi);
148 if ((*it).pfi->target_dim() > 1) {
150 switch((*it).pfi->vectorial_type()) {
151 case virtual_fem::VECTORIAL_PRIMAL_TYPE:
152 K_reduction.push_back(k);
break;
153 case virtual_fem::VECTORIAL_DUAL_TYPE:
154 grad_reduction.push_back(k);
break;
159 case GETFEM_UNIT_NORMAL_ :
160 computed_on_real_element =
true;
break;
161 case GETFEM_GRAD_GEOTRANS_ :
162 case GETFEM_GRAD_GEOTRANS_INV_ :
163 ++k; computed_on_real_element =
true;
break;
164 case GETFEM_GRAD_ : {
166 switch((*it).pfi->vectorial_type()) {
167 case virtual_fem::VECTORIAL_PRIMAL_TYPE:
168 K_reduction.push_back(k);
break;
169 case virtual_fem::VECTORIAL_DUAL_TYPE:
170 grad_reduction.push_back(k);
break;
173 if ((*it).pfi->target_dim() > 1) ++k;
174 if (!((*it).pfi->is_on_real_element()))
175 grad_reduction.push_back(k);
177 case GETFEM_HESSIAN_ : {
179 switch((*it).pfi->vectorial_type()) {
180 case virtual_fem::VECTORIAL_PRIMAL_TYPE:
181 K_reduction.push_back(k);
break;
182 case virtual_fem::VECTORIAL_DUAL_TYPE:
183 grad_reduction.push_back(k);
break;
187 if ((*it).pfi->target_dim() > 1) ++k;
188 if (!((*it).pfi->is_on_real_element()))
189 hess_reduction.push_back(k);
191 case GETFEM_NONLINEAR_ : {
192 if ((*it).nl_part == 0) {
194 GMM_ASSERT1(!is_ppi,
"For nonlinear terms you have "
195 "to use approximated integration");
196 computed_on_real_element =
true;
203 pfp.resize(pme->size());
204 it = pme->begin(), ite = pme->end();
205 for (
size_type k = 0; it != ite; ++it, ++k)
207 pfp[k] =
fem_precomp((*it).pfi, pai->pintegration_points(), pi);
209 elmt_stored.resize(pme->size());
211 if (!computed_on_real_element) mref.resize(nbf + 1);
214 void add_elem(base_tensor &t, fem_interpolation_context& ctx,
215 scalar_type J,
bool first,
bool trans,
216 mat_elem_integration_callback *icb,
217 bgeot::multi_index sizes)
const {
218 mat_elem_type::const_iterator it = pme->begin(), ite = pme->end();
219 bgeot::multi_index aux_ind;
221 for (
size_type k = 0; it != ite; ++it, ++k) {
222 if ((*it).t == GETFEM_NONLINEAR_)
228 bgeot::multi_index::iterator mit = sizes.begin();
229 for (
size_type k = 0; it != ite; ++it, ++k, ++mit) {
230 if (pfp[k]) ctx.set_pfp(pfp[k]);
234 if ((*it).pfi && (*it).pfi->target_dim() > 1) ++mit;
236 (*it).pfi->real_base_value(ctx, elmt_stored[k], icb != 0);
238 elmt_stored[k] = pfp[k]->val(ctx.ii());
242 if ((*it).pfi && (*it).pfi->target_dim() > 1) ++mit;
244 (*it).pfi->real_grad_base_value(ctx, elmt_stored[k], icb != 0);
248 elmt_stored[k] = pfp[k]->grad(ctx.ii());
250 case GETFEM_HESSIAN_ :
252 if ((*it).pfi && (*it).pfi->target_dim() > 1) ++mit;
254 (*it).pfi->real_hess_base_value(ctx, elmt_stored[k], icb != 0);
258 base_tensor tt = pfp[k]->hess(ctx.ii());
260 aux_ind[2] = gmm::sqr(tt.sizes()[2]); aux_ind[1] = tt.sizes()[1];
261 aux_ind[0] = tt.sizes()[0];
262 tt.adjust_sizes(aux_ind);
266 case GETFEM_UNIT_NORMAL_ :
269 aux_ind.resize(1); aux_ind[0] =
short_type(ctx.N());
270 elmt_stored[k].adjust_sizes(aux_ind);
272 std::copy(up.begin(), up.end(), elmt_stored[k].begin());
274 case GETFEM_GRAD_GEOTRANS_ :
275 case GETFEM_GRAD_GEOTRANS_INV_ : {
276 size_type P = gmm::mat_ncols(ctx.K()), N=ctx.N();
278 if (it->t == GETFEM_GRAD_GEOTRANS_INV_) {
279 Bt.resize(P,N); gmm::copy(gmm::transposed(ctx.B()),Bt);
281 const base_matrix &A = (it->t==GETFEM_GRAD_GEOTRANS_) ? ctx.K():Bt;
283 *mit++ = aux_ind[0] =
short_type(gmm::mat_nrows(A));
284 *mit = aux_ind[1] =
short_type(gmm::mat_ncols(A));
285 elmt_stored[k].adjust_sizes(aux_ind);
286 std::copy(A.begin(), A.end(), elmt_stored[k].begin());
288 case GETFEM_NONLINEAR_ :
289 if ((*it).nl_part != 0) {
291 if ((*it).nlt->term_num() ==
size_type(-1)) {
292 (*it).nlt->prepare(ctx, (*it).nl_part);
296 aux_ind.resize(1); aux_ind[0] = 1;
297 elmt_stored[k].adjust_sizes(aux_ind); elmt_stored[k][0] = 1.;
299 if ((*it).nlt->term_num() ==
size_type(-1)) {
300 const bgeot::multi_index &nltsizes
301 = (*it).nlt->sizes(ctx.convex_num());
302 elmt_stored[k].adjust_sizes(nltsizes);
303 (*it).nlt->compute(ctx, elmt_stored[k]);
304 (*it).nlt->term_num() = k;
305 for (dim_type ii = 0; ii < nltsizes.size(); ++ii)
306 *mit++ = nltsizes[ii];
309 elmt_stored[k] = elmt_stored[(*it).nlt->term_num()];
310 const bgeot::multi_index &nltsizes = elmt_stored[k].sizes();
311 for (dim_type ii = 0; ii < nltsizes.size(); ++ii)
312 *mit++ = nltsizes[ii];
320 GMM_ASSERT1(mit == sizes.end(),
"internal error");
323 scalar_type c = J*pai->coeff(ctx.ii());
325 if (first) { t.adjust_sizes(sizes); }
326 expand_product_daxpy(t, c, first);
329 for (
unsigned k=0; k != pme->size(); ++k) {
330 if (icb && !((*pme)[k].t == GETFEM_NONLINEAR_
331 && (*pme)[k].nl_part != 0))
332 icb->eltm.push_back(&elmt_stored[k]);
334 icb->exec(t, first, c);
339 void expand_product_old(base_tensor &t, scalar_type J,
bool first)
const {
342 if (first) std::fill(t.begin(), t.end(), 0.0);
343 base_tensor::iterator pt = t.begin();
344 std::vector<base_tensor::const_iterator> pts(pme->size());
345 std::vector<scalar_type> Vtab(pme->size());
346 for (k = 0; k < pme->size(); ++k)
347 pts[k] = elmt_stored[k].begin();
350 unsigned n0 = unsigned(elmt_stored[0].size());
355 base_tensor::const_iterator pts0 = pts[k0];
358 k = pme->size()-1; Vtab[k] = J;
361 for (V = Vtab[k]; k!=k0; --k)
362 Vtab[k-1] = V = *pts[k] * V;
363 for (k=0; k < n0; ++k)
364 *pt++ += V * pts0[k];
365 for (k=k0+1; k != pme->size() && ++pts[k] == elmt_stored[k].end(); ++k)
366 pts[k] = elmt_stored[k].begin();
367 }
while (k != pme->size());
368 GMM_ASSERT1(pt == t.end(),
"Internal error");
376 void expand_product_daxpy(base_tensor &t, scalar_type J,
bool first)
const {
378 base_tensor::iterator pt = t.begin();
379 THREAD_SAFE_STATIC std::vector<base_tensor::const_iterator> pts;
380 THREAD_SAFE_STATIC std::vector<base_tensor::const_iterator> es_beg;
381 THREAD_SAFE_STATIC std::vector<base_tensor::const_iterator> es_end;
382 THREAD_SAFE_STATIC std::vector<scalar_type> Vtab;
384 pts.resize(0); pts.resize(pme->size());
385 es_beg.resize(0); es_beg.resize(pme->size());
386 es_end.resize(0); es_end.resize(pme->size());
387 Vtab.resize(pme->size());
389 if (first) memset(&(*t.begin()), 0, t.size()*
sizeof(*t.begin()));
390 for (k = 0, nm = 0; k < pme->size(); ++k) {
391 if (elmt_stored[k].size() != 1) {
392 es_beg[nm] = elmt_stored[k].begin();
393 es_end[nm] = elmt_stored[k].end();
394 pts[nm] = elmt_stored[k].begin();
396 }
else J *= elmt_stored[k][0];
401 BLAS_INT n0 = BLAS_INT(es_end[0] - es_beg[0]);
402 base_tensor::const_iterator pts0 = pts[0];
405 k = nm-1; Vtab[k] = J;
406 BLAS_INT one = BLAS_INT(1);
409 for (V = Vtab[k]; k; --k)
410 Vtab[k-1] = V = *pts[k] * V;
411 GMM_ASSERT1(pt+n0 <= t.end(),
"Internal error");
412 gmm::daxpy_(&n0, &V,
const_cast<double*
>(&(pts0[0])), &one,
413 (
double*)&(*pt), &one);
415 for (k=1; k != nm && ++pts[k] == es_end[k]; ++k)
418 GMM_ASSERT1(pt == t.end(),
"Internal error");
423 void pre_tensors_for_linear_trans(
bool volumic)
const {
425 if ((volumic && volume_computed) || (!volumic && faces_computed))
return;
428 bgeot::multi_index sizes = pme->sizes(0), mi(sizes.size());
429 bgeot::multi_index::iterator mit = sizes.begin(), mite = sizes.end();
431 for ( ; mit != mite; ++mit, ++f) f *= *mit;
433 GMM_WARNING2(
"Warning, very large elementary computations.\n"
434 <<
"Be sure you need to compute this elementary matrix.\n"
435 <<
"(sizes = " << sizes <<
" )\n");
437 base_tensor aux(sizes);
438 std::fill(aux.begin(), aux.end(), 0.0);
440 volume_computed =
true;
444 faces_computed =
true;
445 std::fill(mref.begin()+1, mref.end(), aux);
450 base_poly P(dim, 0), Q(dim, 0), R(dim, 0);
452 for ( ; !mi.finished(sizes); mi.incrementation(sizes)) {
454 mat_elem_type::const_iterator it = pme->begin(), ite = pme->end();
460 if ((*it).pfi->target_dim() > 1)
461 { ind += (*it).pfi->nb_base(0) * (*mit); ++mit; }
463 Q = ((
ppolyfem)((*it).pfi).get())->base()[ind];
467 case GETFEM_GRAD_ : Q.derivative(
short_type(*mit)); ++mit;
break;
468 case GETFEM_HESSIAN_ :
472 case GETFEM_BASE_ :
break;
473 case GETFEM_GRAD_GEOTRANS_:
474 case GETFEM_GRAD_GEOTRANS_INV_:
475 case GETFEM_UNIT_NORMAL_ :
476 case GETFEM_NONLINEAR_ :
478 "Normals, gradients of geotrans and non linear "
479 "terms are not compatible with exact integration, "
480 "use an approximate method instead");
484 if (it != ite && *mit != old_ind) {
487 for (; it != ite; ++it) {
490 if ((*it).pfi->target_dim() > 1)
491 { ind += (*it).pfi->nb_base(0) * (*mit); ++mit; }
492 R = ((
ppolyfem)((*it).pfi).get())->base()[ind];
498 case GETFEM_HESSIAN_ :
502 case GETFEM_BASE_ :
break;
503 case GETFEM_UNIT_NORMAL_ :
504 case GETFEM_GRAD_GEOTRANS_:
505 case GETFEM_GRAD_GEOTRANS_INV_ :
506 case GETFEM_NONLINEAR_ :
507 GMM_ASSERT1(
false,
"No nonlinear term allowed here");
513 if (volumic) mref[0](mi) = bgeot::to_scalar(ppi->int_poly(R));
514 for (f = 0; f < nbf && !volumic; ++f)
515 mref[f+1](mi) = bgeot::to_scalar(ppi->int_poly_on_face(R,
short_type(f)));
520 fem_interpolation_context ctx;
521 size_type ind_l = 0, nb_ptc = pai->nb_points_on_convex(),
522 nb_pt_l = nb_ptc, nb_pt_tot =(volumic ? nb_ptc : pai->nb_points());
523 for (
size_type ip = (volumic ? 0:nb_ptc); ip < nb_pt_tot; ++ip) {
524 while (ip == nb_pt_l && ind_l < nbf)
525 { nb_pt_l += pai->nb_points_on_face(
short_type(ind_l)); ind_l++; }
527 add_elem(mref[ind_l], ctx, 1.0, first,
false, NULL, sizes);
536 void compute(base_tensor &t,
const base_matrix &G,
short_type ir,
537 size_type elt, mat_elem_integration_callback *icb = 0)
const {
538 dim_type P = dim_type(dim), N = dim_type(G.nrows());
540 fem_interpolation_context ctx(pgp, 0, 0, G, elt,
543 GMM_ASSERT1(G.ncols() == NP,
"dimensions mismatch");
545 up.resize(N); un.resize(P);
547 gmm::copy(pgt->normals()[ir-1],un);
552 if (!computed_on_real_element) {
553 pre_tensors_for_linear_trans(ir == 0);
554 const base_matrix& B = ctx.B();
555 scalar_type J=ctx.J();
557 gmm::mult(B, un, up);
560 gmm::scale(up,1.0/nup);
563 t = mref[ir]; gmm::scale(t.as_vector(), J);
565 if (grad_reduction.size() > 0) {
566 std::deque<short_type>::const_iterator it = grad_reduction.begin(),
567 ite = grad_reduction.end();
568 for ( ; it != ite; ++it) {
569 (flag ? t:taux).mat_transp_reduction(flag ? taux:t, B, *it);
574 if (K_reduction.size() > 0) {
575 std::deque<short_type>::const_iterator it = K_reduction.begin(),
576 ite = K_reduction.end();
577 for ( ; it != ite; ++it) {
578 (flag ? t:taux).mat_transp_reduction(flag ? taux:t, ctx.K(), *it);
584 if (hess_reduction.size() > 0) {
585 std::deque<short_type>::const_iterator it = hess_reduction.begin(),
586 ite = hess_reduction.end();
588 (flag ? t:taux).mat_transp_reduction(flag ? taux:t, ctx.B3(), *it);
594 bgeot::multi_index sizes = pme->sizes(elt);
597 for (
size_type ip=(ir == 0) ? 0 : pai->repart()[ir-1];
598 ip < pai->repart()[ir]; ++ip, first =
false) {
600 const base_matrix& B = ctx.B();
601 scalar_type J = ctx.J();
603 gmm::mult(B, un, up);
605 J *= nup; gmm::scale(up,1.0/nup);
607 add_elem(t, ctx, J, first,
true, icb, sizes);
612 GMM_WARNING3(
"No integration point on this element. "
613 "Caution, returning a null tensor");
614 t.adjust_sizes(sizes);
gmm::clear(t.as_vector());
620 if (trans_reduction.size() > 0 && !icb) {
621 std::deque<short_type>::const_iterator it = trans_reduction.begin(),
622 ite = trans_reduction.end();
623 std::deque<pfem>::const_iterator iti = trans_reduction_pfi.begin();
624 for ( ; it != ite; ++it, ++iti) {
626 (flag ? t:taux).mat_transp_reduction(flag ? taux:t, ctx.M(), *it);
633 void compute(base_tensor &t,
const base_matrix &G,
size_type elt,
634 mat_elem_integration_callback *icb)
const
635 { compute(t, G, 0, elt, icb); }
637 void compute_on_face(base_tensor &t,
const base_matrix &G,
639 mat_elem_integration_callback *icb)
const
643 pmat_elem_computation
mat_elem(pmat_elem_type pm, pintegration_method pi,
645 bool prefer_comp_on_real_element) {
646 dal::pstatic_stored_object_key
647 pk = std::make_shared<emelem_comp_key_>(pm, pi, pg,
648 prefer_comp_on_real_element);
650 if (o)
return std::dynamic_pointer_cast<const mat_elem_computation>(o);
651 pmat_elem_computation
652 p = std::make_shared<emelem_comp_structure_>
653 (pm, pi, pg, prefer_comp_on_real_element);