~/f/dealii/RPMS.2017 ~/f/dealii ~/f/dealii RPMS.2017/deal_II-devel-9.6.1-1.1.x86_64.rpm RPMS/deal_II-devel-9.6.1-1.1.x86_64.rpm differ: byte 225, line 1 Comparing deal_II-devel-9.6.1-1.1.x86_64.rpm to deal_II-devel-9.6.1-1.1.x86_64.rpm comparing the rpm tags of deal_II-devel --- old-rpm-tags +++ new-rpm-tags @@ -10399 +10399 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 6c0f76d55e8683bad0a11859238b02c00339fd02532f16ebcdc9bdc84ee454ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html a66882cc70744f73c3bc61cff941fea136f961061cb78b8b74d26be5f822a171 2 @@ -10402,3 +10402,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html dce5944c26deda6366b011a73f34be67edfb41b74074efb19620787492bfb831 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex f6ad8d68b8aa18872fbb1b134c14137af92f64944f5242889a2363488079da83 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 81f2ce02eea5cd0ec9e73290e434e6b02fe3557dac7f93b24a269cba22ca1158 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html b9e6774e374b160e14775de73b7b91fcad3d6f259d1cec7b4ca41ec4c38d4b79 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex d722b22d5a54577814bdd651d30cceef9cd67db0e185f00ce1218f83ea6a1d9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 7e5c7e9d7084e8fc3099069ee5b7e4dd3e71586a3a22d09d529d5939158fd043 2 @@ -10574 +10574 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 38978714de80f462581976eae50d8b44c9d6614ce19e0de7281e78332c125aeb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 75e08e2764e235da58c01abc019bdd97fd727e6fe60c49722dcfc6ebc8af44ea 2 @@ -10576 +10576 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 4d622e89675d3cdfec0dc96b0ee15778100b116e9b9367961cc55e95f7221203 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html a3b38240f60e3b4e562b7f50c1c5e931b24a3201a087370695c7ea38b00a9f07 2 @@ -10584 +10584 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 9c806479efeb5a863da8c49ce7481c48a11562b74bf8382e00db27958c399b20 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2847317e74ccf84c91c54341c01f8eb6691b48738011ed4a2959d21ea665df08 2 @@ -10586 +10586 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 28769bff50556ecc25b001a4827523e6d3abc324f70969f49d0f7dd619cfdfe7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 7a041f638fdcb31f8eb89394493ff55a8010e5ce512aa8d59f2d03070be33c1e 2 @@ -10595 +10595 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 44f025c0ef4293f5acda028700a53318abc9499e8ec0071b2418fd5f7473fb92 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 83ca4e1fa686de89169df3ff452a3c1a31175e4d4d3cfa417905bb2473490f3b 2 @@ -10623 +10623 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 54ed43a9f8b0ce831a3cf199774fcde5dabf75fd32fd359e41cb4bb7088ba5c5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html c1a00513490b7d44e1d1bbf5307eff296fcb37f4f4d9a14e8e93263f89f3f04f 2 @@ -10641 +10641 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html b58e32c9bb60a40c6b2342c4305d6ff136f9a8a96d77d0710a1d8ab05bd2d11d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 6ed9ebb45c3a8708f0203fc5c204650ee77ac3b5a36975379fcd6a1c00ba581d 2 @@ -10658 +10658 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 3a24752f64486b94ab21d87f589fe05a5c52ca0d2f5713075bf6c44c16e83845 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 74f51c11e3fa3358e713581e352b442701f922f4986cf82f5199463b0f11d357 2 @@ -10695 +10695 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 45e156ee819fd859a953ff0c76749557d6d81afbe14eb089182b5dff49205b30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 334e9f5f360d7b382d0ffd47695a6fcbbd4dad0621a7779ee14b2b015c6bd64b 2 @@ -10698 +10698 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html da5d4d4020c2feefc6b76a49bfe843c8ce8cf8eb316ccba1a439d593849bb1af 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 19509e503c036b5ea692eb1f56ad22cc5ac814d23e4459a0f9600f177ffc4f03 2 @@ -10701 +10701 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 450cc0b760922081823bc305d868a7298286a529678d951a9139e0e7ff45e12d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 1f797a955609c76f8ce0115602ad75bb215aa72c19267539de390d9236e59e7c 2 @@ -10704 +10704 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 3262e214625b669e17461cf7b3551cb2ef23dd03615f2223c6afef55ceb025fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html a0fd628723c4b2d23d7183cebf0ddc4a44e8b5baa4fd39a6d8ec032e943e9df6 2 @@ -10711 +10711 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 96c726047b607e1c9f85f4c2b1647514f15a692298f81d7813dfbfcc1b160ed5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 815e9014ba747581c0ad9e6739727c3d023cb0b28bcb8b12a66ef9edc2af9016 2 @@ -10717 +10717 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 096807c6824cff59376679b8cba8e9e2d3eaf14e59d2fd3ceb1d7a1f7d0e9144 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2b5f3835ef237705a0b21a7088788c545780513119dccfb0d5045fac919f4772 2 @@ -10723 +10723 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html a4424dad4240b46ba63f11d5b80ed47c35952049b5e5e98d93c0a856ec01db33 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html f23be64ee21d65e88f1d4f428e4bc8bf5222f8ec669116db0ed4811a7768644f 2 @@ -10728 +10728 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2b029ff2f540878a2a13402f922a6192d4d9ae99270fa0ed5739f3237f3ca439 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 4976df2975d7fe95abd402f741e6959d910c02efd9704c1ed91d161a82bb4fb9 2 @@ -10741 +10741 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 849ec17c89876faf687ac03060f1b3cade1c8af1d13f3b790f3e8ce08aee8cea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 342834c865be032ef8ed33b669b006d97c2f46932d379760476ff0766633179b 2 @@ -10743 +10743 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html a859eac6acff3b2df89433c9605eb38fc9d721c962e635df78c421705c7ed94b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html de8d02d15364c60d741e0d067fb410f72ac982b692f34a67a6d9cf8ae326b541 2 @@ -10753 +10753 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2a9125efe692526f7fc07e4ca66c17f88ddd0977b6e8d594569b17114b8b6fde 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html cf74f277c6dc501a9b3524e668e65ed0fe7690141d2282dff6ba7004d516b572 2 @@ -10755 +10755 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 63794e1b3cf198770762628491cc5bfa98e729a5e06e73958248094d477c6922 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html d356a6b0c9fba0406abcf9b0eeb743479d653942e1bb0c05f39d714191ca0815 2 @@ -10759 +10759 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 905c7b123a00735a38b603ef318b777d7df283d208a2b37ad51573060a86367f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html e0e3a863405a6277ecb6cb8786e21744563e600008d10a87bab57b02ba46e3c9 2 @@ -10773 +10773 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html cde5c4418c38ca6f476002b62f7285a3bbc2985dc5efc0fdc414b96d4584ccf4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 8d645e95830cade47c99fa54951805288034608e9cd143012b135c59b1fd74ea 2 @@ -10786 +10786 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html d8d53518bbd4ad0f9a396023734d0255a62cdb07e557a4ce5e4bacbeed95eb15 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 1036f64fa0033626c300a9dd8b40443a8257b5947edda5119adc0e18b646d995 2 @@ -10789 +10789 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 97ed420d8cd992ec5c5bd3c41250110a3ae1b649e6663a84c086ccfca9343322 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 5e38b2e19854a0cd41f0617fb418bd1ab55b72ae9e0574ddb7b7e4040408a44d 2 @@ -10805 +10805 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 6c0586f5ecc691daafb4be1e3e4e92e20a519004de5efec717e2a93da620fa57 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 5e997cd1b8d97f6f3e4f1e9ab9c9cf7088feacb28829f7aa7483fe2d124c04d3 2 @@ -10818 +10818 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html e004b7780e5548a1887a72486796f7bcd8c657773fe0ba1d14cdcb3b5243555a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 368f981f8700e028fafbc80228ce49d2e628189f8a9caed0400373302decd8b9 2 @@ -10832 +10832 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html f9ee6bb19c5d14841fb1170f07aa9287eb00af10370c8d66a5c43ab9ca39af28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html ff9b87dbf1bb59d55a9421b555bd4188f81b3d174fb50507207d007f683a6b03 2 @@ -10861 +10861 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 7434fe63747ab85af53b935ea15f42ca29015ed764eaf6a5ec2cd80066dd5b0c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 4db1d55bf0f12eb14f83c972d45364844e0adee1542783e5dc66a37c68b54e95 2 @@ -10866 +10866 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 727fb7d5b30802a2551e046fbb9bfc45323e0e2321dd48bfbb479749fd2bd72a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 97aa3e2b044912c311da7ae96f190f0ba789945a229ff8e663a738bb0b7937a4 2 @@ -10869 +10869 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 6e003bb2e0d7e1315b9e3b86695f864d8557dd0810fc12abca81ee32e95923a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2fb962d1278dd6a5209be7bf16ea12cb39601a65635415019410e15a15a374f6 2 @@ -10886 +10886 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 85bf5f6d20711361a80b5efdaa93fbaed843dcc3f8080874070b20da6faec0c2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 6275f226fed2651f26ce4cf6b27900924d5b0827bae4f3e11c5c3273bfece1ab 2 @@ -10888 +10888 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 763bf2685572414cd1454575f3d6715d9261956a3fdb6dcfee58de3e827f8c9e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 8634539e7883a620ef53d361019a542c46f01c57b44b731b50058219d95e7cd0 2 @@ -10890 +10890 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html e40e94735669ce41a6706a7357f43e35902c5e7eacaacd200f2d904cd673456b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html c5199edd16288aab88677c79900d3c5a4e76c0f2ebc4a0ac460d88dee333cc0d 2 @@ -10899 +10899 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2091d5b243ab05c29b8a9c6801a4700013d3097305f00e7f5abd787d66abf82e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 96657c50cb3c992bddfc794c2186c74b1f0c30a45b43f4cfb9edd2251c6fc9ff 2 @@ -10926 +10926 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 527f9bbb7f77430523520776af87bc7cef4418fb0ac79d96fb353c856d62975c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 0d3f7321e098c2304e018a0f2db9c5f1fea505f58e0d0edf24c25ead9782969c 2 @@ -10937 +10937 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 3bae878e9ab1bcdec3f4dcdc5063b86e7304550261b23660112343e21ec48377 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 08b33c851f148e387a60c65a9afb6daf44602098654ba87572d95ca9957a3ba9 2 @@ -10948 +10948 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 19f88e160e970b06673623d8873c08d385e7b3a8374cbc109efe01685f0a9341 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 6930334e8331571548b670f88b07dde96e2eec151a45503322371b13d4246740 2 @@ -10955 +10955 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html da9d76fd48f2ec8d7b35de79c4fa47032a99543482b57b391ca7a28d9d034be5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2c9a1b6e6bc6edd97b401a07bc912340ff49c906d6e0575f8ce27af8e90d6e8a 2 @@ -10958 +10958 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html efbc3f4cac7970b21d13069ac2d3c3d1423dab718defc88ee17edc78e8582c56 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 93d447e637be6e64e1a6168d52ca3b710b49550e660e696ce9699649d9132944 2 @@ -10961 +10961 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html da61e22097118c56b5efaa37bcfc256a211fe34159179ab3fd9475e1004111f0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html aec83629198d0c0797be32e654f1cafbadedddd57c91cd280d2ebd14bdb12fa0 2 @@ -10969 +10969 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 965f26979470b7601ec6aa764be0232895dcaeb59fbfdaede923319b841e8e61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 20b6830feaf13dd778241f811893ed1030bd9cf4a3f1b8c548b5f3d445345970 2 @@ -10971 +10971 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 840846155c6ae69bdda4879d85ea73017df9d2b2680c73a1a31571a5275e117a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 01bd5cf19d1d67f582609d4138070fc804b55e31aa6c18266d18a7679803c1b9 2 @@ -10973 +10973 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html cb8777f44d3ea52e799253bcee1ecec84b51f9e950cede6832032572c9736f29 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 991bfbd3d14f2d1644521cbeddf58b22d31c0a1711bb1cddebaba09764eab486 2 @@ -10976 +10976 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 331e3256576e38bcb6c3f5445dd5ddb746b856e3daed601805f97fc06d86415b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 1e174ead3e132af3cbb3694442523875e478452497bc36bd263f5a58da824c6a 2 @@ -10980 +10980 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html ff3d9eda0c25d3b9cdacd408ac0d28d095b027cbfe474a778d034ff74da3bd9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 29b1b51224ed4a0ae23c1e44d47aed21dbb51e00912466b28fe42b6269e9d450 2 @@ -10983 +10983 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFacePointEvaluation.html ba8d8b1d952efb57dd8b27a3dfd93046cbdc274674468d83b6b885d2a549a464 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFacePointEvaluation.html b5137e2bf9f8d7e9beb62bce69941a6ae3d8f62dc80e667dc8a9d808d948f93e 2 @@ -10986 +10986 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 90c1642f1412d9ab263d191d7d3ef3eb4ca1ea63e01865ffb46e0819fb8598ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 1469ccd0051a5c4b81dd81296ab67465ddcbc8071382d80b0ed6da11205a0075 2 @@ -10988 +10988 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 80fc95c39ade49a0f1621cc071bfaf888b439981332d02f99a79af440f75bd96 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html c2df5b553679dd30d50502775d321f5088c85fb3c5184e4eb798651f606158a5 2 @@ -10992 +10992 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 6934676a658e748febb8284716bd96dff5c1b1b6b1d52dd5ae28b4cecea6469a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 7138f14a3c64cad899c0f87b355e0ff1ea39fe2c205f8d89975cd58426c25850 2 @@ -10998 +10998 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 838c075c6bbb4a2e02568c063d6a644caabd298aaf8c757d43647fdc6f76889e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 3649727255a2e16eabf97644a5a11f1e1465bb8c8f47261b067e1819c91f87e2 2 @@ -11001 +11001 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html b37193c1721afc48250b03707f67a5cdd7f3589c605ced89a2895145cd3d65b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 78bf52bcdaf51daa35bca3994e250515193dec9c2540bfc5dbe30f3e9cb0aefb 2 @@ -11004 +11004 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluation.html ca857b6a94229502417a351b9d6ac20f1d3ba8eeaa2319bebab1b47a6a618b36 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluation.html 6e5c11d281252d829ab29f3ae569ac60b413c9abd1f4ddcc5f47250c98a4f66a 2 @@ -11006 +11006 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluationBase.html bfbebe4341cf28e72434413d07af0547d4ca8aadb2fb6f924b9878ed89c57f57 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluationBase.html 3c18f233ef46c5139d6578f74accababbb839deab1b72e91a762323169fb2077 2 @@ -11017 +11017 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2d7331780ccbb39d7fd1dccbd240db54f82a7434f08ea76cb752551915eaad77 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html dba553497f90567dd46795e3c7656b8872876bdc3d31a7ed6ff3b51d0dcabae0 2 @@ -11020 +11020 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html a6049bd7055640a8c0118d8b8c6012f25b6c197fc8b82c091ec48973ac2d759c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 723f88e3716c78cf500483de42e68a047f4a456f6adf1a533f780c9a76a5323b 2 @@ -11023 +11023 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html a6254f9a845616956aad8c6dcb6bccd49b87c0859d250615b582cb568bd72416 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html a172da5cee67013145c3188b618e7d113841f9b26b61b1840362f8c83da2e5f5 2 @@ -11026 +11026 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html e830a80b987b1e529f7cc893843ba0c1a5c8065a9a2a260042ba47bf2783dfa9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 8f7b5fbc0555d37e05a585c34e3cbd9e24e0fa2b748e79d0ccc49a42961f4b91 2 @@ -11038 +11038 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 6ab7818b80794b17a10ace388c7f51276e3b13e4aca45d0a4f86dc5f1bba5b7c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html f5c33fe7728b055ca300e268a776b0be1d18f31474795f490deca08f364e06aa 2 @@ -11040 +11040 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 5034b9841e8e077fcb29b2ac6ac3ab1158c1600cc3aab908b33b1d8f4eac50d1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 0466aff0b6e5960b03bc601bdec7d9c0f99f271a8c959186c0b09de02dc0b187 2 @@ -11045 +11045 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html a19531d421542b5a0a467ef3b6babd57c30893a91a4365322c7c240385cbd0b4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 5044d325a7dded6bd4c4c579372ecb6acdc88b9e94adea9d1cef42c7064558c3 2 @@ -11047 +11047 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 6f4e0b03d4469c1ab8a5fd3064619ad78293212944585231e4de70f615dfc7bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 52b95f6247ca6be8b4bbe4c7362694bdc83a15439a9c51269baac47ea1c8ebd0 2 @@ -11050 +11050 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 537c79287493a6e35ba83f0ae162aae53c6bc1b54e985db752886b831919581d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html eb51e01a271041d0fe6c98bfff996f900f32285544091e513d223685d26bdc36 2 @@ -11053 +11053 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 75be071d26b98c9a498fbac307ad6c3087aaba49c046bdfec1d48239a3b42ba8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html b22638ba0264900f6ea135eafaba80e546d74d612600907bb1e94c14f71a473b 2 @@ -11055 +11055 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html e41637acefdd02b9ddab18c9c771b373cbcf897a55f794b023647587bc9868c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 7cc8a2b1276a37e589d8eb35bbf8c359f26021f0627193bd30774cdd4805de62 2 @@ -11058 +11058 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 6c82f7cd411c649c7cf1eaa16cf2429034ef1f212e905ced5435be173bb4bc53 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 435479a96efaf371d439873608f9480d64893afeab3f610712fd1027ee784f6d 2 @@ -11064 +11064 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 0cdad5b2cf9204a07fbe8ead6ff8e0a37d99915ac309dc8457973b7464a510ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 4c1116fd134b1c7b148551abc6d9940085c5361c5e54f50ba01afa5d0c595cb5 2 @@ -11067 +11067 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 8135c4f276aebb2643a971ee358f681aec3b5e280e4d8883d7227ff5716dc332 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html ab7f2f89737cf7330cecaa7efea0c7ba1028bf34aa83c24f3aebb475af983ce0 2 @@ -11070 +11070 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 5320b3453a9b8b885ac7414b78fe0c40103e680d9832660c66a1de3edc39cb5d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 56488d159eb45ce7a0971bd43c207be47db590d64c37ca0bb5ee9eb10254a3b7 2 @@ -11073 +11073 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2fa9257973dcfde833b2961bcdccdd1e9644434c8e0f5cedd850cb15df8a3e7e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 58438143d149ae7c0d67e18ad4bb241c6a6d8ce2414cb67def2da53be29c99c5 2 @@ -11076 +11076 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 30d1a9b2973e33dfb2aa87121e142964a27c64accd0398adf799ecf31ea6bcfd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 06286e060621b2ba254ce737fde210d4106cef415cd876f5acf2bf4bffcc992c 2 @@ -11079 +11079 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 1bda0b381ca1635a9fc893f83dbf205ee31825375912cfa7e518946bfa1fab82 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 3a97e3dbce6ec0ae61efc2def5ad2737fc967ad921c38f596f366466e40558ce 2 @@ -11081 +11081 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 523e3a99ea754d485249c0ef6479b5cc76e6f298971a97baca699561315600cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2d5985764952be4ae5faee62b78cd8fd58ee34ff9bb1a3b352dd0da80d8e4bc5 2 @@ -11084 +11084 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html af8647dc84088e32fdcc037521ddc51b8c52df188b9eb8cc324a3d503c61a66e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 34996e4eb77c721bf5587432a02ecf29040fde808459b1d334e5b6214e2965ad 2 @@ -11088 +11088 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 42711d7c606faec73a0413d3c2a0b4be987dcbe7b35c76a296be383650ae514f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 3cb45be67dc480e7e7df4e4c817ea614caa0c07da5cb83aad1693fedc61eeb65 2 @@ -11090 +11090 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html b9be9ff0bde5bd6094a341e3f80421e1afeab6d843f664cfd5347a1a744f67c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html efa4780ba648c889f8544f9fad49c14351a258593b6d617d79172c79d289fb7f 2 @@ -11093 +11093 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html e3387df1539343816e017c4731e34a2d4877a510876771ceff0844ccedff2e70 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html f1990cb43c91982720c4dc07c56d2a125212c2dd5c5d09473c3b183f0c4a62fa 2 @@ -11096 +11096 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 964eac969c958ad190250a5211218d722cc460a58dca8921b98ee92b2227e87b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html b008463e995de3f4e712dcfa870ca605df5b940e4f2a4d97e2b04de08e584212 2 @@ -11100 +11100 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 20dd4a6e47656f49e1f405d241b549839c9cb2a9e4d3b0438c8ee39c6f0020de 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 86f8fb1cdeef346ed11966cb9f1a8006ee1d9a500fea48f32ecaba04005cadd3 2 @@ -11103 +11103 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 0af5ffb1c160c4ceecff497d8cd7b32b3c61aa1e8575a312b53e05fa6ccf554e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 3872578fd9b2638196dc7b81d76ef1e8a93727aff4b910b25366b87aa84daa05 2 @@ -11109 +11109 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html b270afc57eb467e40097d6bb3a69dcca525d3dba3c8b73394fc3a9891637f178 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 8c8422e3260264732bfc71b0ccf9fd9221a6d6f032265f5d11cde772b2b9a3a1 2 @@ -11115 +11115 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html c0fd369cefe35142dcc0ae0d0f46516ca3323633888c4a182b954873432270aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html cfcb672786f687bed85278b9f13314fa36ebe7df1c52771b1afa3a1e33b1eb78 2 @@ -11117 +11117 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 484076057ac71f2fbe9440b0c5494bb21ef4b7c5a22fb2c2f268948e07e1d400 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html eca344795beaafcd79e3a98da0954a5a7774ca3fa1f4edccb15b8db2d87ca2ec 2 @@ -11121 +11121 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 270d423ee927c1f414e70214459d13a7bcdaba0b186fdb60bf6c942df2537b8f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 5bb402c8ad8c953771b637c43ef6d159ed4d4e4d6cb81968e0b1ba875c697edd 2 @@ -11123 +11123 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 507cd06b127a47a6a1a253d6a88c109f79bb3111ded9affd1944c539b64fe617 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html c9149956b9f009fd7f470555d4b68dca2c3a7194ef04d666622ec7c2cd195cae 2 @@ -11127 +11127 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html dca77d1b27ab56d28f4298726064a697ce02b5b653755972d3577e36a63a8f31 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 7201f94986fe448d446302b3f46a6a8d8755a80cf6d89e753847d1a20d752b90 2 @@ -11130 +11130 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 90b91803e70453c45b7eb58b8728034514307fd036bd730352acb73161888423 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 126b82187c4648363458e44376f976bb6892a536dd21275f463842de06bc0fd3 2 @@ -11132 +11132 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html fd0fa67251a7cfaa7a97e166ff61d1fa6ee56777ee862a1f8655597ec5eef5dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 5cccef6a87c50af230c230bf4753fba479bf0cf50508edd4e57556c29e49a5f7 2 @@ -11134 +11134 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html c4071a5c50ee0f7b59c0e015fe4353c1cdee590fc55557a44702301f1b0ac7d8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html bacef94d2150876e84981cd71388296813b7d51fb503335d8c1fc16ab2ef8710 2 @@ -11139 +11139 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 1093efd6d1129e3fab5fb215e8ed8efb53c4becf53e0293081756e4396f9dd71 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2f44e708caa9ee2679f3085af01adfb519fd551607b239581c021c04c6af08ee 2 @@ -11142 +11142 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 9c047c7e3bc69f64cc705505462dd2e7ddd45367b459195288f7f03a4482f109 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 08605f4b42004e9fccc29bd484eb07a70e9fce3c172a1af0ef1f267b5ffbe659 2 @@ -11145 +11145 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html e993343788fe8d90e17e2d023c7b57147e103ffbf284d5c97ca402853e68eb72 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 4702b26f0251eef9db53851785eafe573b200e9f6413a2c0cbc16b062b95f65e 2 @@ -11147 +11147 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 8c7d49f85a9bc01b274e5c07769b9f6db3f735cacf2301db5ff019d0d3aec8d8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 50c8b343be03c70d4c354dc55567a2bea569fa5f84af6d2f78ed45d5af3ef8e8 2 @@ -11153 +11153 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 9cd34a66e903f9b42656ec459f06d9efe347db7e882107421c96b1f942b2eceb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 91ee68d16d23eebebc1e33950549d43be8a27e7e9f05c0a0aba2b177c20de839 2 @@ -11163 +11163 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 79d1e1daa86dd87a9ae2fa4ec6b4d19f3a57b706b0ead1e9e19e27022bedf33b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 9ae18e5fede2229dbebbca232b7e9b2fa376b0f9a1e7fe82dac157c2415f5b5f 2 @@ -11166 +11166 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 4f5862f1000a4f852d6df41dc551071348cdf41579a4a689dabcb2002c2e9b16 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html d5458d972b4d0953694d3daf1e6d7993130e4fa94b816d2eed800a4b27d9ec61 2 @@ -11169 +11169 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 7f2f95e6a759e9ef66342f5c29874da339f4f19b542ba721ee3d4b722bf7b008 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html db1c85bb2d2fc40ea900dd5e4cfc27eb1b9013da3576684a3d0a7d13b739c5b5 2 @@ -11172 +11172 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 5fe08b4de924835486af1cc1f7941658eee1bd360bda62e921ac346a42aa36d0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 50917ed40a6e477180b8bd73803fad550c796739bc3506d923b62bd04dfd5bca 2 @@ -11174 +11174 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 4383671c0365e1caf514ca22a2c8c8742abbf9ac910ad347728d7668ef888918 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 081a38a0b599f29e7e3f099b9456481593e32db9f937c971df1fdca64fbdf598 2 @@ -11177 +11177 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html d7b4a17bdc5caec9ca3a63aa2746a9a3c7a702a39a1bb45ce4fee31f1ec24225 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html b332eef8a147a8812672d53aa1ed62715466990d85e02438e35d3a8eab2231e6 2 @@ -11180 +11180 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html bc4e3826d371f250c42a0e55e115cedf8c08dae81e7f8c359e6102d5f4fe1bdf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html e394a409609a67720e318892b3cf8f82a2f1adb2bfc696c7259446672060695c 2 @@ -11183 +11183 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 8b4fdd875e31803f664079ba66193ff418931eb40a2ea7e228edcb7195313f11 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html b8565e4910fcb32c678a1d325cd99fbeff6283a62eb73129205e47a35fc724dd 2 @@ -11187 +11187 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 81f222d8f08978acc0bf4e0a8bffb87b6778fe0d4b75f8c62857ca1e608537b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 67d803d424522f6097577a46deea04ecc9e6b5b05a1917ab6fceab8d663a9b11 2 @@ -11190 +11190 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 8b20d03a6e2e67b3aec2ef7f77530071ebeedf9d5c242711dac9ca2bd244f304 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html c443a1d210392202ae60942f80b15f17d1a418c0f9639e9f607d0da621fc2f88 2 @@ -11193 +11193 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html bffa4fc69cc84381e4dda93cf08f7e38441420fefaff236214dfeec0bd1328f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html f0f0c8ef8f0a8b66ec24202ab1934dfe4e803b98d8d177fa2114da54bd2dcd1d 2 @@ -11196 +11196 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 32cfb2e90ac6d8d8f325e6e50149bb6aedfbaafd48980f130854765b09c32201 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 33c5b4f693c0893264bd6b7cc6c768008e469f1816e4ec9ab7aded450a9bbafc 2 @@ -11198 +11198 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 7366e5a6ca5caa73dce73e5158689d8e9893750d43330ffa64b85b9d0457dcb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 7ebf634aefeb8b7359a8a447ab44c367d08cea3220a42166560dcbbc2156da5b 2 @@ -11202 +11202 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html ad7af6f792de4756c4a86062b1f26304baaafbcc3e924c213debf990fc7aa731 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 6bfb790e43b0bad06e061ba1c16a52e13eb4150c2cf0ff3854ac95e1b6feaa74 2 @@ -11205 +11205 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 6e3b0519575528bf2557b2c8d03a057710e352a7077f2b43d9fcd4c0b854cd5c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html f1d0a4bf4ff52a8d626315fa7a5f6733dfc1b0babc5faaf582b68e831f262dd8 2 @@ -11207 +11207 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2a10ba75f9c1bf774eb42d1c5a2918566d1a4003310a7c7e18efb142e2c15687 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 69a119e701bc4693e48bcf04113dd9f53d40cbbce8a74e850c484aa9b2eab39e 2 @@ -11211 +11211 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 22924b8cad21174754e08b04056b36c245baf826a24c48462fceb44bb0b46b31 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 70a22fc7fceaaf07c89887822db84d98d0461c156f9c8553684ced890ec17d46 2 @@ -11214 +11214 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 7b82abe0461a9935503d74de759475aca1a77d4fbbeda6e7b8cb0cae82f4931d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html bf7d4903d367ff759740644d4cba9627a58589641c0e337ca83b5e8f0bc55383 2 @@ -11216 +11216 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 13471178ba0112e39f15a99a94439c2a63b4eb588da87683bebdec74b2ec849a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 4648f189ea268015c4ac6b1c91a34b108b86e073c36c45e198645f1346b2c320 2 @@ -11220 +11220 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html d5e6d975c532a03d716a8c5db8e38727051a7332af8fb96b982a729aa6f88b23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 5d87104d7eadff05eebc728a998cf23d1c2e7085c1474aa9388325f248b65c75 2 @@ -11223 +11223 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html db15dcc7696289627f59167c4547422eeb4b9ddba2abed8396fad8bd79545b9d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 091f659f8d9e589565444d122883bd98ea3cf7e8f892868832d0b30307603db4 2 @@ -11226 +11226 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html e5411214727397d6ce7594f4b6abc19cfbe0c64453fb0e4444a4d536506bbd1c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html a6bf4bb2deb4cc304aed3fe6954f46016d37e03bdbbdbb01536fc068608d8714 2 @@ -11238 +11238 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html c25ed6d78a14fffaa976b44d14cad9bcd4ec0a8ea24a55dadebb3a9f35519c48 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 088cbd2c7c21baa99eed37c53fc75309cdd9372542c816bdf41a72af29cba663 2 @@ -11240 +11240 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 0be8b2f59d17b36a9649f43ec1e778b14e316d41ad7ced80249aa11998e94775 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 766f92aef6c4b890ca8ebaf6701a736c278d726d6562ead3d4014b7794f07104 2 @@ -11247 +11247 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 4d32cc805dead72d0eb9b6c0a4ea87b3ce808fdd3bc3b4bb04db7173eceecc9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 8a9346c3cfb21fa7704ebec41f960aadb053fd271fc5a0d5544e6f3e36a51478 2 @@ -11249 +11249 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html d92c1492b34a7460c0d98ef8d9f76647795670b4fcb70fe0d06f89d57b18b4d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 87e6941dd1385b15476bfb142a212031004080d7c29a5cfda95917499fbaaa80 2 @@ -11252 +11252 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 1fb7d3f909a7b1df4b462be27bc71c7199381f9e749a16b84c0c737bee3d6e6b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 4bfcca2872bb916cd2ca097e9e6ed4fd185d2fe28bf05b6e13c2e8670db5074d 2 @@ -11256 +11256 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html b0110039b7a9088cf59f8b091bc48a8ded2bc5e3eebb471cd0765bb74c9cd3dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html cd65e1b9f89c4bd9009975905488cf1e814f8fd3797aa98123b709d7560ee511 2 @@ -11258 +11258 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 0c0a3d79686d61e4598736a0bba8a56e21d17f8eea25e255a1052607f5397f3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 6f736b96880478d6a0cd681eee463335fdd4709000c4100eef9c67a0a9221f7c 2 @@ -11264 +11264 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 21605e482dead0d42ac53286e1fd4b1338fdd8bd901873f024476ac69845e434 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html bd5d114aeaa4da88303ea0401b24b0154840314a803b1ec7c000b344458f5bc4 2 @@ -11267 +11267 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html e1744b50e24b0fdc54cee8399d51bddc60be695255a1dd528293d4d1e2b744e9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html b734c55fbb99a48ece2f91dcfe4af34a1c64ab2731c3e4c9af885e04c46cd6c2 2 @@ -11283 +11283 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html d1fa5cbdf15b380486b24754ae053ac1c1cbd28c9425099ea6386e2caafcce13 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 17e67d0a11ab98473aff0c42f05646bee97811f0e73ebc50259efced397df7ca 2 @@ -11337 +11337 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 8d2eaff9b92b9c52a240c64a44f5dad1961cfdcc36eac658d66ef317070478ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 58b3846875f8a97d6098aa30bd7094112be90138c619b77b148e9a6d25426332 2 @@ -11340 +11340 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html b2da9c20b106f9a8514414d4161adb7de86e004d4a78f9de96a12cf83f0bacba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html fe1d9a7275777e28df481fe5267d3b0e8d1ab77f47eae671219059705d10f550 2 @@ -11349 +11349 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 364627d2ff1f829857eed410171fa5a28b6bda7caba5cdc4bcde6904a113145d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html bd68cf7cd34a96e808926f78a44886e4ea72a47fc62623ddd76d841bd744c333 2 @@ -11358 +11358 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 43ce09e4e328f7b15b9ec4584e90ad64febdf0fbeabdb8d94fae7380b71cab3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html e88c44f066c9298d0385a51969d00c9938ca3b91eb67e21145825925a8ac3e7c 2 @@ -11364 +11364 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html eec04d6fc11abbe9189fbb895a4f241d5aaa3e37c77bd1528b6888ef1ab4749b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html e777a28ba1f419205f43e25a8b51c07650e7c84a8858b09b50551e3d8ac20daf 2 @@ -11370 +11370 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 7cdaf3755977215c01a027da51f75a87c3d051e2af8d4b1c3773fa83d014c64a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 7724a4e9fab56c254162f2f596a602d98c1d410c3fca156ffb0cbe1bd30f9c1d 2 @@ -11376 +11376 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 18691bf34bb6c3a0c6ad6b6b4da2a7c4d42d9aaa0ba7555acebff937d2447ccd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html df9c85820d94c33cb0aef6a90426d5e23b8675c959e93808e3ce1e920b4cf1bc 2 @@ -11379 +11379 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html febcf399dcb4755cc6078c4da19c910a6cc47519722ac76bedf290337d55fbbb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 94d73831239936f7773d35541e46c5cfd859e54b1af00250dc6c05a8c1bf9a7d 2 @@ -11382 +11382 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html cd771928fd34d06f09d2e5644b2bfd3ecf19ecd727c28bf2ac14c031e310b9ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 1e8da3d7e8c96b040703925b734dbc684b8d831b103b3c221cff01757536ccad 2 @@ -11385 +11385 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 3811d12c3e0c5a115452fba029573bea67c94ee3c4e62b75af2d39321074e585 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 0b18b44fe39acf4f693dfb9e782ae180fd4ee3a62827a43e717fecb4eff51541 2 @@ -11388 +11388 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html a53f88cd02be4f261b8812bcbd8b4aa1f87fe6b7dd5d2e6a9b6f5c1dcb9fe872 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html a8d84d71f4131786ef9bed1280009264b24f4bd6da8fb53226a3ea6d56fe2d7d 2 @@ -11391 +11391 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 077257f6fa7a3abf248974b9bd05c5864c1ffe31ffed9da6094c9023bf24678a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html c108f29ceb9cf002d2661ec6946215403d6a0dd815e4203ef364a465f9fcab48 2 @@ -11409 +11409 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 91e31d2ec7f010cf78e1f2ca6dfbf5393c5ddd49ca205b4d242af46219d84bb5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 912fd414312618a68c4cf21bc238256666053053b58de45d91fea1346985dbac 2 @@ -11446 +11446 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 1b092d16abebcad3fb1cbadb10d7cb1bf50f75133ac5313761f330d691a96e42 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 13d3b32b75b7b79e4f35ebd9015f1078e974c8a5340628ef410cd15ff4362f9f 2 @@ -11484 +11484 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html b69799fd5e8fde32e803c06d56737f4be87e0cd52d7a5f207d60cd3fe088e911 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 0ef73caaae1895cb18f42cbd7b670d024bb05b0834aa5214728a7a942e2b6b62 2 @@ -11487 +11487 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 7132bdf0a4e71056b40a27cf40a15fa08e7e667a8c8033849ce160ee4c86c0c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 1e8a811091b3d9142bbd45697ea2882fff5697b16925da60b76ba2a10477466f 2 @@ -11489 +11489 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html e393f6b2aedf4ce5553b1f9125bac785bf33a4097bc07dd4f1f64b8257462b27 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 4b3194a468a5b4e0986069afe11895a03c7981c64b114beac41fd8042e9d145d 2 @@ -11492 +11492 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 865368620af049c434bb1ad9f54fb633afa194e4c69ed4f5d5eb4a11db0eaa5a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 6eca1ce65da503fd1e71ab6c5c461e833bdb5806ac057937e09523200cd0b62d 2 @@ -11505 +11505 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 10234c5d48ecc62ff84da0ebe52eed509b12c83003a593e654d03b526f062022 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 4e9a88383d23b8727c60dab3447e9953359da3a49325e95efbdaa2980c4c86c9 2 @@ -11556 +11556 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 343a2b427cc164da23bb39c0926ef9ed5768abc83f5391b14da81989d0aaa30e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 56558bafdada3e640c4ad2d3eaa6aa99df7c86e899e93af43df11a178f49505b 2 @@ -11563 +11563 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 8f01bc3b76624c89c066323ea7a35a289fc22e33c25b73452672595c9d1d8b90 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 5bdf22b0f3206e81716182adfe81fa126714da790612f291f8dc06b9718caa50 2 @@ -11568 +11568 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html a3db45efee74e2e1cd1bb8652dcc3c390530f5b5164364d5249c3b83f73fdd5b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html 5094da541d08c3f7ff449fde20673cd558fa2be2cac1b66b4e8d67a9280ce48d 2 @@ -11573 +11573 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 8df9908a026b1e397e389974c781fbf128175cd0b92d17e48100f2c017b61a3e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html ad7a24500b8604f30ca434c1f34d726b757009749e32342105c90fd48d7d2255 2 @@ -11578 +11578 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html c4b78a0b74cd78395ffc8c6063e4087d422bde986eecc2d414227ddc8f11496b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 1b524d10fabe13b5d321b5a75b75cb4e92505f407ad1b6d3584f97542ffcfd70 2 @@ -11581 +11581 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2cc94f35d7948de7b8a21d81a278e81a6a8a339ad01b8852b7bef0dc53b2467b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html d56ced81cbc888342bc0640b2f5bc1b1a66e88ff1e22cb39ed9cae00f392cf94 2 @@ -11599 +11599 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 16b99dbd6bb61aba30d4756cb45381b57a35938db03e1c413fb4f519c1f249b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 6732ede551a98a17410f59dd440ff086c1915b3b180a865d1abb0faf9e97aae2 2 @@ -11618 +11618 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 9720296c068aa49b0850e13a4b0f3e330cbbe31195601339839a96f196ae3b9e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 96ff39c3f51ace91e50c9b5effd1bfadad039bd0972f56277f808aaa151d236a 2 @@ -11626 +11626 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 7e27d4ab5e99f89b257a8c299d531b71e98119424089d4700c30f6e5c0b20952 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 5f9baa3a3344ddfeca1242148e46f76e37ab2bb6f6a558aa28ef13f035ce6a1d 2 @@ -11632 +11632 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 973dc13ec24564f2baf1a194595027a38ca82084ec87b96aaae66b990c7506f4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 95cf3288340b6039b3a567fd87ae194df2aa064bfe2be16bb4383e7ab4e43de8 2 @@ -11635 +11635 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 31760487a40e333e1dc953bd9e139cd7435ca616c86bfb44e38a85b39be4b277 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 4a244ace8ff209301fd04534b755b061e5edd446b5cbe48969236870870d8059 2 @@ -11641 +11641 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html b33156dba47a7cc2f21db207678b92c47bc37e3016de90075b418bc938da0bc3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html b4329777a41b5d915adaa720bcbe85365f4e380d4e4421e16bfd1c1a06dc9042 2 @@ -11749 +11749 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 1bdecf91ceac0b353f32d68c5c8f4d5dc3e325aaeb3b9506f9dfa81991ce6da2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 785f56782ca3fc900705f5172cecc96464cb0186b5442b7399d3b68e75e4c6ec 2 @@ -11752 +11752 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 0253dba6356d50dac2ca551abe2d1fa678d119f95d69213bc53a78185f7e0de4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 64953b38ef0555cba6c2bafd42ced9aef2bfec5102fb5f21c4aa2ff0921cd163 2 @@ -11754 +11754 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 5cb3ecdbdeba5fcb5d33e973ca97ffde68cf063e7bb91aadb9bad65f061ddb0f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 432bac09a8497c39df9446a3634841a7a4c7bd73b0bfda95d5c789347991baff 2 @@ -11757 +11757 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 7fc131c8e2153414c740df688cf835bb19c087bc005166ab621f5a94fc575e1a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 3db5a1aa4b2093544bfe6cb8cf2846cef80b9d6c8e5abebdc0b33109c44ec6e5 2 @@ -11763 +11763 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 3283e0fe3a80ab6bed2d8e6276409ad42a5fd4c49f12dbfdec6b302a3f620a51 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 300e8c0d04cba913e8977eef9a4680026d2d89ba7fb61a49607c6f3b948c2f88 2 @@ -11765 +11765 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 0471955bd9b3988d8d903df2cec3f8979af567fa38222bc27b06e4dfff7bc192 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html f3c424137b2e2b8ac8fd3dbd65daf190d5ff56338a4f198d8f96dabbda0dcb43 2 @@ -11767 +11767 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 9b7d90b696f3ddedc89fd851047383e1d18433ce973223d8ae7378467b9fd3a3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 6c762d02918d80a25ed1ac321039e4c54d93ba77dfced4893af4a40ee85a9125 2 @@ -11771 +11771 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 05018c58a7e9f5ab6f36a8f4f712e9188192bf69651fa5e5cc7d4231ac58024f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 49fe2dbddd300b7c46753412ee3b604f35146e20472db660b5e236f2d3d48b3b 2 @@ -11775 +11775 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 6a1f521db0a42c4f3b04b8f6ebeea9a03ea7b909a904208baa9a8aa18ae74725 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 7abeaf4fc3c53f9f1ab242b12970aecb9aa8cce7a0febc3f31cea27f83c2dede 2 @@ -11777 +11777 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 09ef2b07ca560afe1e37ec9a2ee8bc62c9f3097aae757c5211de08933e368fc6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 4875ba341f76fdc8c1fe6fae18e4a732f5d7d9d2d808ef66505ef0b95181e980 2 @@ -11782 +11782 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 5f91c4d8a4ea64621567ce25d7f50056279b098ce4c6325a7d1135f8dd66173e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 6112405cb1b3f8205915f2b1f5254302e75da40f4047d67272540276f44eb038 2 @@ -11784 +11784 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 1da195f3b408507cf819bd48445ee093a0c165047a29ae5fc3c8e202425469f9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 044d4d49ce63f208b840be595a2aa55bef8c597b51d29bdd7914afe3d910f490 2 @@ -11786 +11786 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 4103ea297eb8b882d1792e398fa061b62591959714720c26243210d04d136083 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html ee8de88065e74df38459bc8d85f4810ae22a37a484c381e5b20577e962da0e08 2 @@ -11790 +11790 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 4d4b824a1f81295baa86b6c9b2657f942f00503331d35c92bcdc2acb6d70d0ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 7bc6e023dba900ffc31fbca8379d936c7d85824f7e94a0b2ec1838abfcfec9d3 2 @@ -11793 +11793 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html d3ced655a78268a7d6aa5d0c21af851e973534a2d80b910866e6f80da95c9e6c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 4850f6122b24b9802b430c0c937a836559b59dee3e4473af471b08bf4c8d7dfd 2 @@ -11799 +11799 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html a8afbd5e813f19a88fc09cf33f50265b4ea8042b5f67192610d7cbe5b0d41e87 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html b7a83a2c03f7df8bf24db2e5c98675f49cd9ce73167cff5b05d80f389fcdaba0 2 @@ -11869 +11869 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 647fcc191d7e82eae8e57f0d9a49330ae840a739e04a2351ae197a0634f8cc40 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 07b8bd2f5ba66cf0c5090c14c2ed5de5974d5eb9b23d142aee58f657a614e866 2 @@ -11879 +11879 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html c3c95611c965f329a8390b62d2a5bb132d46e09bb2086b56f2ece2432baf568c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 6c14af3b9e21911b11ca13e940d83a97cf3b8ac16d16d19274d170789fcb9fda 2 @@ -11913 +11913 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMultipleParameterLoop.html 42dd2dd871f3d9098dad12067154745079a94015f12e43468739f364aa00b8f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMultipleParameterLoop.html 8429a0f645ee4bf178cc5b5ef57868ac845e59fd5a192501bac587cf021fb663 2 @@ -11924 +11924 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2437811b9ddf2996a6e380a12f3b94e8d1276f38fe06bd3a3e8aa8347877bfc4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html ca9e5e04fc39c363ebd17b4ef9b49f0618a37b7e363f9c05ae3436152893503c 2 @@ -11927 +11927 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html dc5e4302502eea308d5ad862b0f9a33fa75e341ca1697aecb3180ce06d7e1dc9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 3af838b9bc8beeee88f8a984a1bbed52e3000214115fab5d5fcc96ac50e9e4cf 2 @@ -11930 +11930 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 24d7b410c745a5d1225823eaa03eb938a1fb72f43b85f3b1ee74885701c600d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 0e782a941b360d19e8363f4933be1f638081a67953c4b6fda779ee3a22a0c476 2 @@ -11933 +11933 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html d7c4b3bb799bea90168ab0eb0a56e25f68f1c7871429c4718bf0c58ba9ca56c6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html a422fe2155264f7cef76826b617213d15c36199b72a463b51659160b17a7881c 2 @@ -11935 +11935 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 690caa64ecc514e677cc67bf3796ca66c649bb6f8e95e288f907b44ad006ef5a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html c22744a5fa612aad74286c0c99536400d37978a6950c1995f84cf54539e9a34f 2 @@ -11937 +11937 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 0ab253f4de15c51a372712d7d5263cf6d96f0226bc4cd08e0b57ad9c503b74bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html af0b904cea9fea80f13a8f7444a2d8871ddc4689d6a6270b7a522b3937046a3a 2 @@ -11939 +11939 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2db810ccae0b5f42b69f894cd45e0d16825aa301ef6ed1ad7a14c1b20498e9ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 3e3913b82f39ed5146b438dfbdc5a7a0444d1a69a16fff889631bccc839ff56d 2 @@ -11942 +11942 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 05f47cac4aadabbcfe91c570eb1c63d43ca89901d9f203a5f8e9d1d823cac10e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 026ed758c234c2e8f98455fb44cf35960a08f7cafb088201bccfa3c328d0e5c1 2 @@ -11951 +11951 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html c02103d4e65a1209367d199c4e343eb7157236defc33c4edc537ee2bd5bbf36d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 9caab3ca91bf1d437a32505abf7ffe85e937d53ff2992b88ee8ae4bc590de4a2 2 @@ -11959 +11959 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 5f2e0b9ccafe22d40c7ad2c3049b52f85fce5f0783411986877e89f991487150 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html fecc0e73b5f21598aeb1e97fc24b2df273d4e60e8d7c14c9067b6f82a1d47e24 2 @@ -11974 +11974 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 756d32e3ce5a5146e1e3f6459077a604a1369c40a8003bfbdf7edafe7ae4f1df 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 41c49be8dc97ce857225fce12af2b583289626109fd83da392ee94c6ccb9241c 2 @@ -11979 +11979 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html f30c4a4e92a0dddbba8ca439a023d87625659d42290078d130de44df4ab439a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html b09b637303ed51480d73d40e657ba42c4ee92830dec9e64ee637d380849e666e 2 @@ -11983 +11983 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 5c3a4a3f5124b77035e71212737fdb1e2080a0624589a5a8c2e83a4810ebe4f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 4bc7858517f9a4fa4ef14f699830f0120543aa78d0a4131fa3993e3f9fbaf1bf 2 @@ -11985 +11985 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 75715ca494963b3895c5c937e62e804c20ae6b4e7a06fd175b21853bbcb349ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html c5c7f39eb31ab0a852f2650b223a69951e79f4b58855aa808131802b0b419a32 2 @@ -11987 +11987 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html f772dd97f715d54e7bbf0e966bbd2269d66058559b3f887fd8880916fa72d7b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 64e1adcfee1a4c855c67da28eec360578243facd8969683c1268e73cf5b3e6a1 2 @@ -11989 +11989 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 6227722954b46c6d506583ad4a2b377308db84867061881882e8e5f0711907c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 549e089211e3b96656923c47ffeb8230d5e0fda0512318440232a2bb85662254 2 @@ -11991 +11991 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 7c89a7ab27865c00200d0856d5ea16710510e71e69afce3228ade9e7f234b9a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 9c24c9d7b54a38b6fc2f38a25697cd33da19adeec4e5a7abb82aebc54b50e8e5 2 @@ -11993 +11993 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 76314f3d43a7466492e7f7951cfbdb590d2deb07a42777a1d64553e43214ee68 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 13ad55f1c138e966938629d3ae48463fb6248e351e361634eb1eda97214c324c 2 @@ -11996 +11996 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 8f19aad605bd59bb5bc3b4ef3a51eef5c2d3cdeae4b36814542743ab1b7d3f10 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html d86ac474020cab189e8feb16ab3280ee1fb7e01ee75dd24a8e62af0e176e4217 2 @@ -11999 +11999 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 575d6c17b62371ac838c9cfed288b219a3fde2f274d50e91d6e226beabe995e1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html f68db8357461703abf7a2793a7912d9bb360bc54fba0633bc736bd74f97da5b6 2 @@ -12002 +12002 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html d407d86795e20ca0d9d1fd5b1deb4a0109b0ba78cb38b9c87200c99de05f2488 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html f5bfa2294f631c612744b5c86dc085d8911953bb8c2df44f777112199da17a10 2 @@ -12005 +12005 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 3b82642bd9e0432e2119ff2c748874d263a2f49ceacedd04638e7d31d30b80ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 4b8763a9bf31896d1d413fedcd49ba3416e1645af2d60bf7380de078346ac589 2 @@ -12008 +12008 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html c854bc4af47920691d45ad0ed09c6e6a63b0d3ff917942310038652944da623b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html eea9d5efc534811b85ea60e252eb6fc3b2b090f16661bf335a5198d916dc6c39 2 @@ -12011 +12011 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 76c4d09f950ad33b62d36f65133c12e9c49d8255f460ca28336ef2cd94c8fa79 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2938d542ab670b6ac0eacfbe45dc5fa9b35babdc164387d15478c04426904a68 2 @@ -12014 +12014 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html d33c24554939f44de51cb4200c4dc1ef0657d17948f7a661eb227fdaf9f5d50a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 9d66c13d2e153f24c90c3ca16131376a03153134a47b218c8687309614a918fb 2 @@ -12017 +12017 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 00f139d7dbc418f9849b475f4bcbef399b739a36a6a0aa548306e6eefb50c865 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html d148f023cf8b73b63065fad7e7bf049566108a8ed64495400012fbda17f34941 2 @@ -12020 +12020 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 0632fdf3e61b5491fc8c49a210eb724d150226afab822c9ca2f0a1be8d4b6213 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 30b28dbed599236a63f8491cbd826b61cb9828e6ca592d43004d0942e86f6b72 2 @@ -12023 +12023 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html d3fbd0a2ef768f6fe5b4b4c83f7d12462f5e8535cfcb1cb15905362ffd6c8f61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html cc7df25ed5e4e7976036f021f873a9c99c72d42a90956ca889bec2e1494196b0 2 @@ -12026 +12026 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html f97407e6f9b568af81aa793dcb9d71e1c25bb8af4184af7def2d6fb8b8e85e16 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 0fec87aa0d6fcc1655e728178ed2471548ee4998991fb20bb20497bfc6935490 2 @@ -12029 +12029 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 09d768c2764f05d1b477db1a6f841932280c4f0647ba4293b99254b0bdbc91a4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 0256f2f859c6f4ea7ff96823ebb19a45cd61b502bea453741adef4a0c0087bf3 2 @@ -12032 +12032 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 89a746cc95a833bd2c983a3baada4fc279fd6da1ff8a5d805b009f96c94ed58f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 3d422c9731af401cff0bbc29e75f2f03324a7de7b276a229fec80bd55bd43538 2 @@ -12039 +12039 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 126264cd52c60b70b58ad0ec32301f9023ae40e20392b8aaaad6e0a1d29b40a8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 65f0a8754d55f43014f80a5046a6211551a9cdd148546b7ba88006384ffecb8c 2 @@ -12127 +12127 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 35a3e7502fff3bce8385115c7240821b4638590c95b3b91569786469945b0aee 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 53a8906df49c0ccd0fb5d2babc9eb5c36d77de0da954b19e4c13b19fb8f2fc7c 2 @@ -12130 +12130 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html d1f7fb0c425c0e584dc05ea3b4897c305440a3d1704812f9bc0c85fea75dadf8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html b3f051cce09e5ef6a8c60a39bb412015aa2f0a9a4fc4b27828e4de88b3c2f532 2 @@ -12134 +12134 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 19b7f7634aa518729f389b28dfc4c71fb99720776004d0ebba131de5ba773521 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 703b33d3c342a63f54ff6ea525a3bd722a5a65ebe05177282c4eb791f10c9dbe 2 @@ -12145 +12145 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParameterHandler.html f47c5e74fb413388a43a8e187ce3fd6dd062d06171174e052b8f47ddbac2d97b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParameterHandler.html 2bb26df589af9d708230f71e67c4382ea02730a27c913c473940ebed6375aa5a 2 @@ -12158 +12158 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html b3e76026cc4b3a947d918c39c1b2d3b3643848f645ef11c4612234cf15abdafa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html ba5902b5bc3ba3dfb4468010192c9a24e533eb9497adceac8bf295922d4efb57 2 @@ -12163 +12163 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html b774d2f2c1532983b028f214fc93da8fc44bd8f467dd02a0b7d98ab766e2d8cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 898287b2a4fb4545d6c1a19c13789b5507f456a0d87ad984965fab71d1929128 2 @@ -12166 +12166 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPathSearch.html 76334677afd7068174f71845651e83547471e14c2c0e8115e2296a2dcd42ef20 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPathSearch.html b29b1d3229c18949d45b0b0c00abdce5a75879f167d51cd7736614c741f6f2c1 2 @@ -12204 +12204 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 0506acc9c0fc3c83ec4a192bb80e94dfdd3cbc811795d2361dbfe6dccc2dc0ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html cfbe3679c0e40da5ee7b0a9236ca019e8cec2801a07e805922bda99dba08176c 2 @@ -12209 +12209 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html d21bccd75261a447096dc56bff1644e5217a81ecc8e51a367e25ff7486bb3c52 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 375546fb4db7ef62c807fc3c77cff176812963fa20e0237f4209c09348bc6f05 2 @@ -12214 +12214 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html df60c8ec448c6f062afb13ab98c7bbfb07e086d98da7f606b9d2859a5560c0fb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 3b42abfed070616a702d39beae4d0ea5f08eb2635329080d8f334c9547cf05ea 2 @@ -12229 +12229 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 32cc1f234d6f89df77e46870f73874ad134479a00c84781a495f7ae31e4798b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2959e55ccf45ee8ad36d45db73f3140832c4f2ab239f2039af7a416d9e82aa30 2 @@ -12232 +12232 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html f598e66815ba422ab4d6559e5b4f57fad8a29fe4b49bfe67aacc1d801a20eaf2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html 2a6acc6db5216ad0af9fc2e8793f31e5a0bb69536c26302a6cafd2aa470c982e 2 @@ -12241 +12241 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html 17eee86d1012c8a52748577714de89bff1d9a4edc1ee5c4982438288f9f10579 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html d9914009764afcd67031b4645e62e1fb8a61017e27663cc31c147df7338404fc 2 @@ -12250 +12250 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 8b64235e91e9593c3acd2ceaf43d44ccfe281df6b59a9a6e0250f24e066160b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html ad1fac6dc915b8324c90916058db7f09eb5b864698f6efac039fa57718e11e85 2 @@ -12253 +12253 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 9767e175cbab624128a2b89a418fe4cd35f25fc03fb99833bf8eaca8ffe0d82f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 39c3fbdc10f64ccd604456c75d4c1e9445e6b57f8dc2088c1afe6985c306a432 2 @@ -12256 +12256 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 0261bd8bd8187b38954179bb64ff45ce326fade2bbe3c0efd315695fcbf1c61c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 71a9cc5444d0e83b0c1135646a277c411d2592d747dffd8a5d8747ae569561e5 2 @@ -12259 +12259 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 68f166e2c9d56c518ab33fa04ff964808116aeec4bf4395b8a760520f6de7af2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html c767f833b117bb9078d0519c01a41efa3dfa9d4c0a6256faf08bcbc9ad6c53e1 2 @@ -12262 +12262 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html ef48b2974c050111696de9498a9f441cdd6b689f9ecec62605b9dd2d101f4448 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html bb9af5d182f4fce68a7ed10fbc510a400fcce19f423d435b2546d44437e43309 2 @@ -12265 +12265 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 5eb2245d786bf907eb9793f7c40918c4aff1ec77c7872724e76095b8f0367e6c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 16db5ec339fec78190db8dcc0c8eba299cc6425f920955d0f55c55c3f22e055f 2 @@ -12268 +12268 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 42bd6d8b4989e648ac848606c29e5cc94bb416a7fd8e5e3b825f79f31a3c8c49 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html f6876d9ca094dfe040946ff131f204e22a1ff0915253ea3b801eeadaaa1f553d 2 @@ -12274 +12274 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html dc03e74cc846600c6ec9310c086fc80d8bef4df3b97f311cc511005b4133c01f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html fddac14e999a9b91eafbd23a448b891c89fd6dac74ebf5a6b04616dbab7d7497 2 @@ -12277 +12277 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 28390e04bfc3ccb3c27f11198206949d57d9e1f29a178ef8815deb1353db94c1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 4ac5f2b07ffe485ac35301bc0dc96456b80fa9260fbb2efb9af9254db78336ab 2 @@ -12326 +12326 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 72e773caca2342253d82e18565c1ce15b219e7a511264a130607c29abd023f97 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 952eb23d866c4ea110e32e4e1f2e76e2ee0e93ddb7e216cc202fd2ba3ec41aa2 2 @@ -12355 +12355 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 420ea0304d95cd329fee65f1d5d5821126ce73b26db89846b6a458d3bba51066 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 247b4ca6df0e2f798002cebacaba97b86925d7aaa3a7de36d71302675727616a 2 @@ -12360 +12360 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 175db470efb66e6ee80bbdd9cd38a2950b1e32c8ad901b5048a80d6c7d5cce07 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 4a0150bb369ef73cca03fb4461b786fa180aea3fccc1fe77ee4d2a9186438971 2 @@ -12363 +12363 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html f36c44fa64e9ecadf688b2d2a3ef181b3299b75e79e424c7b5fa52a304680394 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html c70f1c58178998d88ddef92efd0656844f9c3c150491ce2f019b45297d4eed39 2 @@ -12365 +12365 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 436f19426e9c7ebaa61a02a24c602f41f1efc3835d370eeaab3e0fd75a3e0ab2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html e270feec11729bdb756ee0bb372187308b7685e9ebf9730c927b55db8fa2db88 2 @@ -12369 +12369 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 94314255e7704e0d2bf4d34e3902ce9d2b6816650ad6e8bfaadb58d6b5cb87f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 4004f76d607aeb07799af8d21125ea8ab28493c884a007714e99bdc5c821ec94 2 @@ -12371 +12371 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html f706a907dfd948217851631877197d37a7e64b6e4e7a12b1f2c6dcf55ce962e9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html db2690afb73b28327a23f9c6e95911a8e3242d5db8c045ac9ee4cb8cb7d2bfb5 2 @@ -12375 +12375 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html b396c896cfd5b423b456a297a7a3b8536ef2d39385b9715d7d9a101c2bfd2be7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 66554d01105ad93a27392c584251e05e9667352e97a2024e3523548af23d7018 2 @@ -12383 +12383 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html bdfe039f4482b20945528ff9b150ffecbf5d7a0f35f06431d4a6964e0fa01b01 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 7f276c02168c328141e421d22ff0a97747f1aa140c8bccda740ba576473e1447 2 @@ -12387 +12387 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html f278180f6928238b7908b2bd037ac17acd433b8912bf8fd76721d24f6a689871 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 261a37d0958da68cee420750e2d74b379be5d52ad856a70d1e9ed004e285545b 2 @@ -12410 +12410 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 9f1040304a94ac8f3f368d0e868fe15568adf2648ee398ab153c06c4ec02eb95 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 640a4ff21eef0b642fce6474f53709589c8d82c370f6f9c0efa582687940c253 2 @@ -12413 +12413 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html a861285a4f05f7e873ad6c888f0e71c4ddcb00b9c05bb4e29334092410d4792f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html a1981566667cf2487e8b4a66df85e00f539fa59555288a3c9ba8f95a7d50196a 2 @@ -12425 +12425 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 4b8b8fb8d484654b342c6a86c3b4cc9a602ff1db71239a4fd73c91dce00ba9db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 8b8a88cd72b17cb3cbb082bd26e24a7a9d3370ebaff18d5f0ac209b7598bde9d 2 @@ -12431 +12431 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 59ab4c4d0e026d252e23d8a36c231c320652f52688974bd642c378bfa799954e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 7d09d47c7b35d44cdefe8c788d8c7f1e0e40749d6d1facefbe55a9ddcf6d1f94 2 @@ -12437 +12437 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 61c3986009b376338606e3c72c8253b87d4819910185ad725eff03aa49c5a87e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 1ceec65f3b23938cf5aa396b6281d8d91468b398b662b8de807463e79384c5d7 2 @@ -12440 +12440 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 28e54cfdccdf494035c4a575bb7625db61264560a98e70493434b122df87332f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 8fcdc912f3751cac294071de8f898c2eaffd05be8a6abf4b2ef67ce3f0078cb0 2 @@ -12456 +12456 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html db0210c320b9052b4c7509c3811e62b3db056647f6822ce64c3e04b377f40090 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 454b08ae6a0324c925ecc18db693c46ae3a0783d6d9f6b3bd3bcf147746731fa 2 @@ -12491 +12491 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 8863d122520bcdde707293f87b037fde530f57bf7877851fca721b80b61a9161 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 6a2462450b707b3c49956614e65bfe1d8f7e32044a74cdf3f6067cfccf378f1e 2 @@ -12494 +12494 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html f6ea08e6ebec0d7b89edeeb3febf7eb7472ead474ba5a7c98b51e10e226564e7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 5cb1fe2be42b315f90b601ac398d43789726de44a9fa8af6b3c2ac8c3ee802a1 2 @@ -12497 +12497 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 4f82df7588126e9f0753d92d2f8a402ff63d3deaff297b1dd838b96e8ff1a8d4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 0707e0feeaaa159d95d66e8db676d66da9801f1e0910cfbed379a1b6a7fbb4bf 2 @@ -12500 +12500 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html e090211010ab449eaff230d9006fd17f4207d0d0d45fa556cb17c9541fdd8afd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 4063336b61c2b1e70d77aac9d9c613ab0418ac5e3cdb593290ed37c783675f88 2 @@ -12503 +12503 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 6d7504ce3f06932e4256780765910a013721f81edbcbc5fefff96d06887e84b3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 873e78dde4e5a8aea876aacebff4ca5a2dcd9ead178aec27da6da18ae94e8341 2 @@ -12506 +12506 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html ee04ef3f3bf5207dd502ba51d1191959201560d3e932426e08e1cfb360d9789a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 3bcb2827cdde6ebd71fb373c732f561732a0144693ab297e99df0d8fb879141a 2 @@ -12509 +12509 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html a17c82f30438b2d63f44b02ab574e6a1029818eeb6f62c15a5375cf07fdd9813 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 3e16700aa130cd0ab2e7165a0aa6426999911138157c9a7c124f1752fa8425ad 2 @@ -12512 +12512 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 006bd4f0a1368e4206cabcb9473ef2c8cc2cadce6126157e7fb655bc6ff5af93 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 38c42ad38f791ca49e8be0f5b8d5a3e36b7f27cce81ccdb7bea414ea62256704 2 @@ -12515 +12515 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2415c5cae9d8970112d8647df0ed2a2639cbd1093fc740337058c0d8a7f9ee02 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html c045881dbb9f6628138c7e67e4a1c0ecd1dbcf6f108a6074659da5a0344e6151 2 @@ -12535 +12535 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 1df54ccde21fcdc5c6dbc222e061d136f4ec1f52e940cb47a5db2f1764969af7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html d9457993f21a9dae839d9d9a4b587fe7a17caa426988420dd7ee8e0db684ae54 2 @@ -12539 +12539 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 79e5a6b94170e1c1128d53928e17cb9b009bf55c31f1b33ba8459ba6de485ae7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html c5874b19a82d1780f6da3b503218afbbd99393b995471807b38db52ae1fdb617 2 @@ -12541 +12541 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html e6cce09fbd68205fef9ddc882da1b134ee394684c2c5d5bae31d34fe431dc725 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 9fcdc4a0f42b04e9d29937bd0ba5dcfae37188bda2b0b1a677f4749475dfaf12 2 @@ -12543 +12543 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 9594aa0e73b1c4009641def5c2d3dcdd4580e29e0ce5a4301bd9dbc847262047 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 63c5e97ac9a1af217c51fcb6fa2e99fa0abf6aa66db2ff8a6185c9e7c5903b94 2 @@ -12545 +12545 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2d753b11cb85d8a5b6a6456b4175e9ef2a9b0b2c49690ebabebbca394094cd9a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html b5b14367c2977bf907cad131b43d5b7d2187d747f829c1ea883e53439cec2588 2 @@ -12556 +12556 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 65d83c91369cc32a273716eda67225d40f24c3aeac1bc2ece912d0b15b6004f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html d5e8867079346668c5542d020c7fd9b205621a8effa42fd5c0874365c34b765c 2 @@ -12559 +12559 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 5e406aae698b0260707ad1d7146b34f053d75b8ae1b21b1788c55fff0fa4ff96 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 67fdb523ca0a810b62d84109ac72a98b447f28f6ab9a78211670a90872adf81a 2 @@ -12577 +12577 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 9472ccee1b2c4d5f4460f872aac8ba907f4bb6f47a6de96b8d278cae395c03f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html d22d39db31aed83bc50cdfc4eb5844c8d1c4fd50a95c246cf1c5fee8b87fd587 2 @@ -12598 +12598 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 0e8d5d947687ed25494f0e6ad8f56732b73686d0a1c8c588f3ecf4c8f4c598a7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 1d0d5854e9c603307235362e539d4925ae1a0b62f45c89ea4a663c8a331c04af 2 @@ -12619 +12619 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 1d2fd8af6c974c4ab1823c4dc8234c1642d00aa33c1846878256a339a4097351 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html bce1fb0205f3e2e5b80a6ba30329ef2c62a588e84a79e749c8d58ef116650ff2 2 @@ -12631 +12631 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html eb8099a9f580336e6ddb62bdfe10fcf2e639fcb182eca4ff000ef4d37ca4c1f1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html e5fdaee4fc338172d27e75bc9ff688a82849678412ddf24918c1eb305a4ee070 2 @@ -12635 +12635 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 0c16abba03f1478e0fb0315a6e6fb6890e587862a8a4f70b71b51c25092f0f6f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html f72137b741b69bf53e88c7198c1c70725c3c98a6542631f948a375d1af8d95c8 2 @@ -12638 +12638 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 466a83606b57d43a9d24bba8d396f49ac398d341c36ff6af0a99ca8f147d4406 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html ccea15bc3de90773ced306becde2e52d916b1766e01980a58db7802deafa2ce8 2 @@ -12643 +12643 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 300c4bff00c2b910b2b3aeb6b6520d5664b65ba589a2cb7c79885ee9f9d1899d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html c6d758f3b86204be0b807671e88efa3bf363b11daa127f968ec6e0c9e7f17322 2 @@ -12646 +12646 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html f4f1588d2510a3fcc90f896f04e186b28e3d607bb6a986912fc839e5da04f328 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 68f245ae3e8cad42eca81a2b520572696c49676b4044cb2009595bcdc8723097 2 @@ -12648 +12648 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 0cc05ba1c7ed3b2cd060677eceaae25cd6a6ba5235b2f85cc30069a4d386c241 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 9d9dcdbb722ce9cf0cb71e6f505c16e66d9fc9dd147d218025b6fbe7f51b000d 2 @@ -12666 +12666 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 4bf4890abc78390d853d9c42ab5d92ded98c00b1740126192a33577cc7513c0b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html e9e0575f15715646c896434a1ebe80983b5076d91f5d7b6609929d0ccb2083a6 2 @@ -12674 +12674 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 12002958639051f8135d4dc05b380c017d8123beff7620b75005783f1c993941 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 3c7eb05734e1e669376098816f74cd063e3795222911fc6e1aba4bacbb7b99d3 2 @@ -12682 +12682 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 78315a0bbc55f1ca1a3079e194861b4938bbfd4f43de98f6887d9185195203dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 303a8aa55b7f32678c0c365730ff0012a45b988d27b1684f0c9a5f3960159de7 2 @@ -12686 +12686 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html bf77a94508769e6efb0dd0444c41f5567d36968d274a6b3969bac62b8b102ad1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 04133098fe8ed44c7594758f374c23a9d8c3f4c24b9fc444a7457c5293773350 2 @@ -12692 +12692 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 6b52771fd93520bb66eb133cfccdd904c499c14a91c5cf757616f8bc923574cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 09c294a9dbfe8cc4f46ad78b6d572f3075638e628bbc2dd2c00f7c5b33c63a83 2 @@ -12696 +12696 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 9a06f143eff3bcc330ad43e2c558f3961ff08d45f4510005adb5022f2162ebbf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 469dcdaae2035432f354f3cdb668d1d8006394208b4b811a69c6b1a22404ee2a 2 @@ -12700 +12700 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html aaf98289b0a3ce482fbfac72274d43bc5b333fc827b5cf00203066bd2a3a29d9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 86d71293fe5124b59be269b515e849ef3e1d551290c4a9f599df5745f8f65401 2 @@ -12703 +12703 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 21d9432ffc73e3b18da1ebdb4e713d3c2138ef5cd916c4ceb63cd178349e053a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 460b03da7013d4b870916e142b43017dc399fec1aea5c561adbd0b23cc077f09 2 @@ -12732 +12732 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 24d43a5195761eb569e4e868a5176f68d0c98cbcf0ede309d20b37c97a690e2c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 6a1ab6aec007136cebc851cd26861a7c260f2a5cbc8e1a9f610836c5a71c85cc 2 @@ -12771 +12771 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 746c897441dff7ae9da24c565b57e44b44a9efc5e60f4384d2bcea00a3bb73b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 02d7ab38adbc1515bc32046561ce532fd02ff6245aa98caaa3543718e52d7ad2 2 @@ -12774 +12774 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 8c5f092fec1504b196dc1276554d4f2233a8f4eb85e4f6b544e430724950c5d8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 6202048166a3aac9d64393066d4f0707334eed0e050564e93512f31af6e81f03 2 @@ -12843 +12843 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 7203a178844a8509fe0d1533d850b0a92a024909662d68c1a6cbecacf9f3b98c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 7dc74529914b5ac0df07f53ab86ad09f2ea7921219917be2428032873b074efb 2 @@ -12848 +12848 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 1e1e52f5149302b0b038229adea71d448243df084358314b5f6afba3037ef29d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 0b86a3a550f593cbbbd4f8b020cc6c314877d3bba58cd2a34ac303ea2c69fb01 2 @@ -12854 +12854 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 731874741a0c128af666cebb7403ae9a4cfafc85fe63a9f88da57d278f18135f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html cc04c19b643dd3d64983fd64ebfd60517050abbf61d05c0ca623f10701f56e0c 2 @@ -12859 +12859 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html e4428d818921abb24f60b3ced77ab0d6cf24ec035e69d44073384de86b11320a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 773bc8220b353fd51f66d34ae73270f65ef39809907736cef4d5ee0c4ddf4f3f 2 @@ -12862 +12862 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html dae877f793a3428f1964ff83ee64e7c7498bb6f2ced406590a55d8cd206ad34f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 559fb54a0d362a6378994191e6c0cc996a015bd09cac68dfa49a15f842919081 2 @@ -12874 +12874 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 4419f2d0df322d8595df9cc13bafd8dd31adecce412b7e42a67ae330f4320b43 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 190e4b7922273a469f38f50897d64b99c18f12b16ce711b020942a33027cb266 2 @@ -12879 +12879 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html b3756ef93293fa6213a837b52f12a3fdcc24ac1d91b884106b01e80dfd019a2f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 43bce5fcefc716c28fc64800f2faeba87ffd797b564d32432761796ebb9cadcc 2 @@ -12885 +12885 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html cbbab28c45e4033586acb1574f3c29074326a85ae13707fbe3d61827efbadd78 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 59cafaceed3fb2f741489b0e28d0f855e44749640c20a84851d0dc40a0844cd1 2 @@ -12888 +12888 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 4bdc3fdda11da63425980fe1f4a02babcf436244875367e3f6c15e7a5568582e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 76e7cefa17260829163cec58ecb69f4680b995b2195e4ca855c77b3b11706d03 2 @@ -12891 +12891 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html d5da4d45907b5045c5e829a1cdb0fca215f505cc630da3fea7c3b2d6f6498cec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html d682609cbddb24408edb25fda8ebabd2a02c514843594c2b5cf5dfbcccaea57b 2 @@ -12960 +12960 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html de70d0bd05ee96f60c0f9fae013e745b940bfc12bc8e3847b1e3f46c0030fa51 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 74bc9f67230afb62e92d6e7f5856770234d5cedc012b870534eb94a3f64aace2 2 @@ -12979 +12979 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 184a2d94705dc8cc76dc514234fe17169a0c5283c7a1b6caa9b81feb60430824 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 4be7ce5015d8d9af99d861f35f49a3cf50cc91935e1669a292ab046314dec892 2 @@ -13028 +13028 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 69ca0d023a4fc6872faacca217c482582c3f93d741df2c9fc60e368c982e0ba9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 62ea80a0668085494a33f3eb8777d45ac5da694543a2428416af54066ff2219c 2 @@ -13031 +13031 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 3c3745c5e6097a77467be85b061d002f14dd1f056125b9a6566e5e41b0cb2ea6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 3eb2de76b0138ad2ec69b6ca208b3a6fc0c6f84d535efa45cb60e54711c692a8 2 @@ -13033 +13033 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1RemotePointEvaluation.html cf99d3ab5ab042d7ffa137f6587471ccdc04039cba89d32e2d464275f1f2818e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1RemotePointEvaluation.html 9cb647afd79d16343dc5a2fc486178db05c94a660be3d67b063dd3e435a9143d 2 @@ -13045 +13045 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html f8fe0def965375d073f22b353c58332577bbc843eb744b06ebf917209ed550c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 4166dedfacc00978dfc5a077c242e41e1ed41acde2a6770f963cbc2732abffef 2 @@ -13047 +13047 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 04ec642b4193d0752dc30ccf2a5b24d1ff8b4bf97747589954a08dcba41bcb35 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 818e86721fe88e7fcd364fc3241ce6d31ec4c63ba63c2e0282cf0dc9e748e60e 2 @@ -13109 +13109 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html b78f81ef6afad241394c92430906fd43d9102691d2aa6495a998bb02cb2c9856 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 8073c632ef959536b2749761ea8c33d17978771b834675ae80d188dbbeab0cef 2 @@ -13236 +13236 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html fc142d18281014904643a4ea73b8ee176d791a1406559ac1379a81423fb073aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 3d2280e9e4342879acb3758af1ec61b7f6a9ea6fe3590c0e97843d2dc96e24a3 2 @@ -13316 +13316 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html fd55305c3ebc32562276d3b117fef6ba4c9b983851b08446c55c15e36e5216bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 8e294ffba2ccbedbe73afd6eb86d3b8daabec4e40333283ab71353c61373df59 2 @@ -13319 +13319 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 9eb2eb01fc3e08c89235d3e03d9c7756e3925028149347c56c37c09e85c466b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 96229fff72327667d75ee61413aff8cef1507194274188906a672c04d05c872f 2 @@ -13332 +13332 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 32ed70826a89fe233bd45a626e5999bcd24fa6d62cfd3bb56d52e4b2332324e9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 8188f9b51246b961bb299eb80d29571b609d6e946c8d2319420b0a4098a59635 2 @@ -13334 +13334 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 3184d08e49fe2e299d3620ead87b9682be8e4975d3b6530be3c75fa43ff6200c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 072a9e71fee05716dc68766e1668a7cdfe73953898d59ec043c3a9a05faf4632 2 @@ -13340 +13340 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html a3bf34102120b629fe817e27216c1d7d88aa0eade96d36c4bc7bc34f5c0a2eb2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 66d829c233b25f2d3c4c1f589be7b8049b4d09ad278cf0acc51c0b7122044072 2 @@ -13345 +13345 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html c3c50e542f2da1edd46bb4b6cb2db24b343c5c25c5d85c9a903b89b0068d06c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html aa20f2401c68111726e530f396719a957bccdfdf742db2100403bc25fa316349 2 @@ -13539 +13539 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 70d8e2487fdfafa992f6b40d39e53ca0d6ad705475a2906ce57aebeabe9d91a5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html c3e5440a93707e4ca8947dd516dfc211cb93e7919971e0a4c8f73e81850ffa78 2 @@ -13544 +13544 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html f27f73dcd0eb1c4ddc290a4a529d3109b0c7905fd6ca55c9b4affe520da6f22d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 6eb4c3da6c1318323a6bee28ef834bcba47e15160ecf85953c96c12c180238bc 2 @@ -14107 +14107 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html 82e5ac57bd7667f4bb79159e5f16a125584d3161448cb7aad2aa97fa5aa879b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html a5fb472560f5bcee013cfebfa56819426f03ad1622256fa6033134e2a4dfdb37 2 @@ -14228 +14228 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html f326cb67bf116bed60be2adba8c279aaaaecb2b82c87044a7a8179ba6b4fc817 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 9cb4dec1f3cf6257fb28d079aef974eecbd08cb63c9560331bc3025ffb867038 2 @@ -14705 +14705 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 6506c14e1137e71afad256f30c7a1b3d3dcd7a968b62bd00417863d135fd3084 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 9fbb882bf6659517afc6ad4ade8de55aa0479829a257d31a6629cc84ceb0eac5 2 @@ -14708,2 +14708,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 79f2192f2ff1f964e861c6a0acb0717302415825ef88298b3692a10c09c097d7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html b426f34609b08a313aad86f409521e3dcb6b177043808ab866047e8c0e3274a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 3a48bcc96cedf15795cb5f144182ba3c3534572a646a4432d0b41bf686266088 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html 56c3f074d9731a34f88e1f25146b3da661b3357a5a9eb771a73eb7097cd5b917 2 @@ -14721 +14721 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2b406a5c78ffabc35c9e3053dedeacbdf3bd28563b86c5442e5449402ca38742 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 3e7cbfb1fd075e6bebe6daeb0b70b1f56618420cc99c281a00ae0268e6adac69 2 @@ -14751 +14751 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 757c4fd5031ebfa61226a8c00bb2c3fc90b7bbd3a7fe57ce44f76818e4bb2838 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html f654a19a856163a520659f359d0ffc5f659f78b18cb37576667a57778ea22bf1 2 @@ -14756 +14756 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html fd5e3766dffa1e268ed547488f7672d310503f92aefdb6ec7e560bb65e3249d1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html bb455b84b1d24c72b3b2b346e5f2e3d5bf17722e50db69e21129814a12100c45 2 @@ -14758 +14758 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 0b67bac979ffee507e4ca3e788e99c7a3027180c851d908ffa5f58ad5b208907 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html fb954909b7e1859f7b996d020ef8afea09597e14d427c68c41f8fa2d7f5e538f 2 @@ -14782 +14782 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 0a7706a75d803778fe97c4c3685ad000c595bb7aff9f6d68f00944c1310d3590 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html e40f35ad7ec55330f4e478ee7250a09de11f92607eb92dccf203d2f0a447893f 2 @@ -14788 +14788 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 3dd642a1c85cb4c8a9419a5e59df4e37e57784ac1f005b1fe5ff948f92102f14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 4a1a23119653ccf21b5e5bad5cd193829569107a531bfd38bc78ef286aeaa92b 2 @@ -14799 +14799 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2d51997ff9a23b7fd0c5bd3acad43d2ea04a080eab48b68fac51cc4a24855319 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 63960f317c6c70ac6a528b1d7ff43b40c814c49ae06be10f2850c05f8be73671 2 @@ -14804 +14804 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html bb5853f1edd1eb7fbb7f54e787dd49db6851b18da7df16a3334343f9fa103ed5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 40b94170bea4fb451658c93f7a0a2d0b8f1ccb8abf45156939954949a7771568 2 @@ -14808 +14808 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 6a53ad63982309f9652302548e47a52b9fc1bd8bf83a622ea0232d831c5f19ef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html e154c2ebde6cf7c36782e6417713bd0fad2eb557f7c95c60dc75b9096d55dd6d 2 @@ -14901 +14901 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index.html c7942b5ae28d6bc4554191155c0a011f3eb150bbad4647659f707e233d0e82d6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index.html 63da94bfc7de886566b8ae761da2dca4c00fa516ab8513017a5bce6ca34d15bc 2 @@ -14904 +14904 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 6ff40533c4c5f11d3b688c265a15e40777f4e19d8f067c4a12e3330d5511e78e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 3d31bde9121ae94f3b917047dede84baeb0b05b255f844df93a375583770d128 2 @@ -15245,2 +15245,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 5e108ca4c9eb56420f33dda7481588d568eb55c096461fd6684d09967a550306 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html fdb9dadce3dd1d93ecfd42a8ae15eead26b4e81258d13f4b624dbc5ca7c26e82 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 979da966c1ff352f13092ce2a0419fa2342460eecb02c826c3a07bddd1e9b3fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html ddb495a9adbba149d9b4a9f635e9ca6047bf001c0d6331407c3e96cd5caf4331 2 @@ -15261,2 +15261,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html dc8377a06e4a395e9c76c64fad8da9d9523b3bb3cf36d958e065d34d663d44d4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 89180d2af5439e999436a23b3a37fb470fd5b0a1cf716b23d8f64862ee3d5d9e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 96e4297e7f6bd6bfae3924ba90aa59d4e95b633fd7c9fc43a2f466b6dec58e2a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html e077b6e46c2941b8ea1f0fbf9eac9b178b46a445be995ef818902eba9035c88b 2 @@ -15266 +15266 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html ffbc33c1b7f3287201e9ac24a28f316f798955fe5ba330f428f9c0a11e067983 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html c867fdb8aba955852f441552eccd9c0a2b8448fb7a6da98a18f4f224ef2cdea1 2 @@ -15272 +15272 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 3d8b4f5f57318eb030913be1c4633c91a7b6caeb03324cb40a4f069cec02a035 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 866b53997e45fb63f354a8cad942f350d00ff3721ce44a8f313e5202b912b215 2 @@ -15277 +15277 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html c4acee15ed5c898e13224df956ce7f55388b266a7e3abf9ed26551bc0d8e1c06 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html c51059a1390cf156b6c70f0e07765c8dbce745f3e0c05e9d0c6ff7f4fb281380 2 @@ -15282 +15282 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 03ddc01369405df4fdb33bbe52010d978232286c87174bcbb9b822df22f90813 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 11796d22367feba1f9c8b55e818a9e1c2cb45854293d412732d2183d175cc7b6 2 @@ -15290,2 +15290,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 00a344b6030bc34528118cae2263982a6bc29a81324d3c461263d8e3805db0da 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 9acf873a080bb0ac8c5bb1dc048fd915a4c0f505c50d45f5d39021d7280e5e1f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 85834f5067c9460e51d2402cf9787bd185941ac1435ea2d2edc3a6a8e8fbc78e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 58de4dbeb4810302449264eb0c765e73678dddf8b46e27c5c6b4b0bc5f1457b4 2 @@ -15299,2 +15299,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 826c532cc2ae8e66d82e327e5cf088702ab929a85648537aa53bf4c0fdb1788d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html c9a0cd981c26029d934d43cf0d750e786b530a1d23afc916b9383921dacaeeef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html bfde139e2770278f05cc53bf17c8081ee668aa1caf47f97527af03ccd116c124 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 8c4f93f934e9825faabdb88046c448b6524618b27629c5d2ff5102b1dc57a704 2 @@ -15309 +15309 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html e2a6e878ad1937f3ef2110533c4353ef2bdc134966138e7536f67438ea9bf0b4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 07ffc4d98d94e0469078a4c55f5623d7035aee600efc96ed5726d9d3583eef05 2 @@ -15312,2 +15312,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html b0959ba72714e1ded6ace51f80e05fc20c548fe8b73f4d12110b56c97f79157f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 1121d6959b776ce576176ae16827151c84ee0c1ae81f5bc00e6033899229fe52 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2496f573bfd2fe6b29c9872b14024bd25155e86367540773a4cb2544daa3950a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 206ca598c86309ea4dc6fd62de17c1b725e4833b5e7d0660eae20a13cb7af8d7 2 @@ -15324 +15324 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 1ea5cb882736d1b7a17d51156df7128b5322735f1e7b5dc3f0258185ec8a42e0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 5af24429308cde2a07603487621f8176a6508f18f76a56060f5cc3ce16e9274f 2 @@ -15340,8 +15340,8 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 3857281b79970af799ff9acde70fc72647494b47f94d6084306de3da5fafbab2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html fca9502cae4b5821ef24a80c6f60ef2658f7adac1d98c1ae73f52d7cbbffeeab 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html bbf0cd8ca8a97ea30f96cf07cddba11c196a87c60d52b4888b35e90f684301d1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 7657f65ea5252b75dabd2541894426d55e1b24995ee453409de60780796857ba 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 298f066531554fe2f15d394bf895d529f896f0f61e1d245f2d3fb008247ad950 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 30302e3324a7906ccd3448a339abe9588bf6b715886153935e67e6fc3518a571 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 41fc7df89e02b5905b152b83713d150bddf97a869dd89b9a0ec3f651a63278be 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 694cb1c3eec794c4c7c8c763da4eb96c5327bfaffa2244739131e9c7acfd10fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 5ad804780f9cea112998067c6b29ae31273cff0b34474cd47f975e0d634f7465 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html bff096a037583ea6fc8641527a5c041126b1ecbbd93bc882601b3ea158284e3e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 46123144061f647519e1a34c692764e4f8f501c9005fe67599205abdad6fe92d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 16f3d5c6f106f51756a8859a1d98d1a164cfc1bb919989368558c261bf61cb6b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 975a86639fbbc5afa7f58f6be3880fb51c348003061ad069a54affdd2973ea7a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html fb973ef7dcd37569e9cdb406bbb301488d84f9d65e53c56e00fe28b61106dd57 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html da7c022df927804f6c695148741e06d7e62948d62e7aab7cd1e4d1deb9bcd6b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 1570175ba3066614469dbd5171cb5e7db661c898465506b9fb20e96733e6cf01 2 @@ -15368 +15368 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 734f9c7f0b31632d4ee8824413d978f2e59865cac999f51bc6c9880fa178d662 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 3c78d35e3141fe7476c7f424cac72b3c0c29e00e10e9ebbd889ea4323e4f79fd 2 @@ -15372,2 +15372,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 3ed707e1d2ef38449a90102a1139b417dd3f13e3e134b98679136998132a003f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html a95d4dd68ea658fe5361f545c3e0b8c3ad9061aa30f7ff543c7b2c318929e717 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 22cb98c9ee41904c9dd6a44feab0efff919bacb315eec9ed0f86a276b2f10e30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 29bdd934b296d81c76ee37f06729f7b839a9d2f68cb496f0bf5b23358fbeb60d 2 @@ -15381 +15381 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html cfdaa9bc3a28d2d605e9417f3e5e4d355ba5454a3668ad5d851305eab6cad4f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html d9bd35624bef53c023d9de9c05885dd1587b2e3e2c64e65caa085ffd28d1f802 2 @@ -15391,5 +15391,5 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 0dc5982cc9b29a4f9a786724f695d6bbf83737be751df7231628842e171693fa 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html bcf7002a2310429564acee4493a35ddd7bb8b673227487d2062291f1047cdae1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 49151f699207af7fdbffb99e01f2dad82a7f7cebc80a4c4a3a2f48fa4ae10d65 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 40dbdf662b114654cea2e79be6f9fae79285fd506907c53f5e069b9ad018e114 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html cb0fa894a3125ec3961a845a8a66a0bfb3e3ed9ed7490c272a8e794e9efe241c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 871649bf6966bf88f26ff968b88cc31e0cce1c6e73c999263fabcb996a21b82f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 25af7140a9df897170ff635d200705e267060dd5a491e12808ed23ed79e9da9a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html fd44c9530912cab87565c6f30a521ab7a2e39794e462f7d1b148c4e65814c8b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 4541d763e98a89e489defb36343dd81d29395d31a061e78d5e9d6f304c9d11b0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 02fcc82831db5b4b3e72f4d2d752f2cb57c5e21a557af6119369693dddb00751 2 @@ -15397 +15397 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html bc159c6ef18783c1e2fd19302385e5bede074fbd05c735ea64e6016538355bc1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 89b2a7d6689c72d7c6474c735738eafd51caf975d95310e88c9a4f29441a080c 2 @@ -15407,2 +15407,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html e14e7634cf87b57f31959c2c34e2c6ef2d3b1145e880a681433c237f9fd853ec 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html c5bf99724abdbf9c5ea43717a635103713d098396ee9e0682208926d6c354f33 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html e08270ac882e2aa69b7fbb51f93beb89269143ae46ba8f841c0882a147ca5f55 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 643ae7c1747f30ffe655baafda06b2a51dcb58b7a8559770cf72002b6805a6ce 2 @@ -15412,2 +15412,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 16c97a03b915a2b5707522d62b1838a22edbaefa656186c17cf795586f931aeb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 22ab1ee27f754629af51dc8bd7474271159d45399a63bf87f8855086b81a850e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 99fb1ae7403a54470cffb0daa95af21ff773a3f238d432b13e1389ef104612a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html a2ee3672de3ae92415b01e7a0c180d4b6cbdf4ca8b13ecaa4df95296498a3049 2 @@ -15415 +15415 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 016559f889e1519f4b666fb9326dd3643510e9eee260e5cc7501923ff1322546 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 5c0f12dccbbfdbc47629278b20b13a268587265dcb786bb9afe0c5edf255a0a0 2 @@ -15420 +15420 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 4864c3c51ddabf6fdbd97e3cf769a2558b9f0cc5c6e27aed12d50e8bdab134de 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html da33b2b8f5130d6d0edd38653e0d0cbe962065696343f1dcff973469cf617d6d 2 @@ -15441 +15441 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 54f9b84c4e7eefc355e29b4c8fe5c286041b6cfeb1780c7b730eab08846b2246 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 03a7da85b9a1444509b357a9dc5f1e36264c056b47e69e7677ae798264be22da 2 @@ -15443,3 +15443,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 35704d3b6429cb7febb9cc8cfe04bef0446cd9556740ce2bef9b44993cc8bfbb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html a17f7909fd7fe7a63b8a28e021c3cf46ce94a74b580e3157d0eb32954979ca59 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 3881438c0493d3fddc7814d9eb5feb05b44abb59cdd4fb185e18d81bf7d6edfb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 287e263a993672c5eaa2eb27dd872d049cabba7a79fefccd58e73cc6ed6b4fb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 5072eb7855e09bc9c6740bb13e2d4fb8c23992838ff43bea144041053002a0aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html c9ef8521f54f2f6628ec135c87be05b8f25cc8bfe676cb84fd52d03ee12df42e 2 @@ -15458 +15458 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 8baa6c54ee4ac18d197c6a687d9e8e00c7309db7c418506939f4824734ec703a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html c3f08e793935900725ced2c2d439c3c65ec9520c800c7e65452d2654f7dbc902 2 @@ -15478,2 +15478,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html c7ddfebd892ab4dafcce208b0d52ccb507111d85e64ca2ed2b1f62a902bb1a20 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 536d68b00fa29c1fbd58a7b9a86b78dfdb6a162345e4042de6fdadd6d88fb234 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 902e8722f8557948ffa29107b803ba4f1625bac45a736b64959dcecce4bd6ac8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html d7305ab4b373c05874eb2a2dd6f5cd9632458674c2dedb7fc1f0a365923eebec 2 @@ -15616 +15616 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 882a2b8cadb5ea2d030e5c481d44cdbc49bbd382de17580c8fff1325d312e617 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html ab3ace4e6d3e8bf1e65b09ebc236f69728a789c5ba30faa99a0303489bc71922 2 @@ -16711,4 +16711,4 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html c175480d06b83a3a4c53601b18464737d84e7ce1cf64bb7cc860c4841cb56d0d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 1c5078050e1526e883fd994c43141041b8d76650b9ccf2e9ff2efbb42aeb361d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2ad410689e062f0c1d8190df69d3bb5942baf0097000379d5c943cade46a3f36 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html d405e4a0d42ccad0c1cf3473314c9ec501492270bec6a7fceadc6908c7c15f4a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html fd0bd81f0d31e66ba5c5e83ac64d8eef883706c46ad306fb6cd41c95f7eea4b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html d5b664caa135436f21901c25ae25c77f3791991151c774e89c3897bfa551693e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html c70bc56ae960863badfe7e26ae077ad1bd915f9b2841e3b2a5f98210f4a67834 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html e1356c15040cfb93938701a6cd113ca6cd6b58763820643b0b58d5e688fa0e97 2 @@ -16716,3 +16716,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 38a88654511f8f66c398e5e51ed61047039e2a4397de13ec42bfa79331eca078 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 56367ac968bdcef04ab3e07784d73f09e607ce752d31cff1599b82fb1fdb4335 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html e20590eda9cf5ad70c28f238a794cebe69273dbaa07038f3294ddcac6806b5cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html c379dee1734b8412b1889460f8764d33c2e13d390ab65cbf9e9dddacf9002b84 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html d75a9d3e5110c838024a8111f981a97a00fa365e8ff7bb7a51809ab3564fc56c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 388c2a1f8a6c6a9867c6fdbbbe5bb9138b1a0498687bfe73471f9b75f79bea63 2 @@ -16720,40 +16720,40 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html a22dc8d7faf4637b30ab200cdf8a41e58bb90b7b030308910f505145421ae61a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html d20b625e346f95d73581effb0ebeddec5f70242ebdc50b9b8543fedee6959fca 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 1a71c276fd74154a089a8a2081dc4c3279d7539c7170460f39f7ce435c59d8b0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 0d1061d202f549333c3fa9875c4047e2352e57614afa175f820c0abe5ee929b3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html d61c189878ed3e931eda2f25286922da665f52dae09da61bfe7e4852556dfe27 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html b3622bb830c2082bdb0b01b18a38f4963963f4d1743fde0e98151309c64aea7d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html 31ebd093acced81e9e98848e56df632003c871185c828121764cfd13455004cb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html d00f342cf56e74b5c9d7b7685f90704bd8d9372ad4899a5d0baf1b6a1e786c5a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 3a5675b977553940962f9fa303a4cdf10495400ce6b0c76de3df97620d4bd653 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html e499d09b8cebcb0428d54c22f8b0f53feac193b4e8c5f66d2202fa812b4eccde 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html 1cd9d499bebac748cd9f502117d84ed885e0e403b30d4d7a2673deaa588e623f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html fb41c618e0c61a754d7f1234ce9e67323ddc315362a5880a51b82ced7eba6552 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 3121430d8f52633951287686776aef267049143dd7d8cbf7c51816eb49494841 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 0e2b5ac22e28b1709d36845483b57ba4f3fd8e2879d3e7d10d7ec49f30ccc8e0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 79b1adecafcc23ba2a7ee9441b1ff830076998bebf9051fec049172ed8661dd3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 9dd149ada5c8f073c425156972fea7a2de52bbc7cde7f63484868a38a340a36e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 35f918354eb4bc0d0089a9b11290cbde599068a59d4dbf7ae8f876133d470c46 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 8a3562ba3a08460619c14aae359218c8bfdc6b41aac20c1deb3670c2ad226f38 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 8348a26e0ef15ac2f2ec51b2a24116b1f91f57f18fa8a2f610dbaaecac5c8464 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 6833a296fd55cd9bfbc60dcb0c4ac9c6ff2e8a334393f82ff82be970491d3ed6 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html d11f7a3862fb7516f2d9a917cc229f8a9e6e5cdd9168c3f56932308e2f8b8102 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html b25a3498bbe48075fea5dfc59c74df9cd652dd73f56aed609e47b01319465127 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 37776d92ef65494648329115b6c2efb62f957f5b7e68d68121fa0edc08e2219b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 3a48bd9f263f9b60da806f836db247307d58e7787fb17afeff2bd636ba356422 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 55a23e74c1638815a33f89e17f10b36fd6e425169bfdcb4ae990bfa858991e6d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html d112a91ba2dbc6f6a4dd21dfabb35694e0a9cd7ec4cf28c66559410eb780af39 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 8497b56733938f32566983eb7d78d8f564541168b65721aff5b171d0cea07b0a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html eeef84361a02efecc3d3db89404f170098125adfac0ca64d7eba794fa03bf916 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 1014660e1cfafa6b73eb47628eb50b92252cf3833e76d4759346f7f835830f4b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 298d6a39a19d535a35ac83a6a7c8114677c067a5752f9a5d15403db29c589ea3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 5c1aa2ed99f2fdae249ce0f61db0c4a6de04c6762d6664a8f5e1639417303dc8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 066a23a7a89a8d7bdf4482e7bf599a0ae1c86e98e60cd7325e29124aefdac5ae 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html e71882ae80e14bbc168285165631695fefe626e51886058852dfc1a7335acd05 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html ce54f04dfa235e7f6603ce58fe7d04de627aadb31a9348aa0a7f960636a31ce8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 07fa6b22e69a8d0e9de01d392e5f44b31c2e8ef8083a34b53be38ebc8e5bdac9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 6cd27a3ff8eb4a103e40fc6d2b606ef1e4d23b178610a5adc0e829446e257726 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html f26c0f463bf74c0d6d34aef78f22476fa0df27706b0ceb9cef35c1e8043438fa 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html b69c29801b73b446b5129f50f2b72c5a5a0c4ff7e146a53d36e2af97bf3f937f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html dfdc7e3430a554ec55311d5480033efd0afd9820303b57a328e0e49a913d4219 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html c827418b088aed7b08e9c8a592e881e1f05614a50d3b80c73a490af6fa5e8b66 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 244e2202da79866f89b8307bd8e3d2ee6395229d7ff613f03e89db6e995d389a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 07341749e3e7467fd3e7433277e98176768db5184773c32c6d93d37a6a36ef4c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 0715e8de32e1dbd5cbd2f3891c87c36886f6960f533837ff559f47292810ee45 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html d348d95dd1fbf01d4076fc652b3eee1da57482c32e0466e383e42c39d50d7c7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html b4f70e873cb989043f37255e5cda6d8a4351deb37f4cdab40d93ef4a927c3e22 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html bf027e3193fcd97f0bc633ce6e7a0404ac35b169c9ef1b632b8b68c5f4557b04 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html 474e890318fca60fa6008ee6467262bf3c9b6242a1e9e82bbf8d09787de4719d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html 0cd03afeb8fe58c53b9b3a0d6b1b6b834aeeeff09cfcc89fb45af07310be7810 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2881158589becf15150c38a405a14c29a76816e936206d25398aead27dd4cbb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 7f3ca313a6d823b94b97d0b621b6e0db67d86b0895cd96086e3498c29e588c56 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html 89e80de7faa2818c772cd96b0ae8429c9cf603f38e997aaac730d690707e0339 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html b961bbf9f0097d8872cd24a90d2b6ff29d582f3e21f01016be728e0dba35ef2d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 41f8f3a074ba69b1c2c9562929c8e4f198c38ad4f4441cec47764e8d8f76cecd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html eea469e771c1e12dce72bf9c85bb455a7f5c1325ff28465714eb82324b1a01c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 13e45c7b9338976bee923ce2ac55a57acfbe081b2f408ba865f543594f520676 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html d7f35a7eb23a275c0919c3138d42a03d626e1b55ed362c9dc5c72b2121c22791 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 66cb6e1379624815b27f7d5d61da8ae892d5cc885ad4d57b800714de368bf351 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 1b4fdfde39c38710fbddc1f7c5ea6ec515c18b9a537f977f46015bf16ce05ffc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 213b61f4eda6939e40e435508a6ed9a46ac65014a666b3e75a0141703e694ce9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html cf642b30ae93131f4f1678026c8f8be347d729fa70403708f99b099acb469a33 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 7cea80736a3e5ee5bc6292752c54e7858b0c16c562201b096feb7ce956e5fab0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html a5ef1d0e4582fe7e63ed7a394d92821ea578dc283d54361c7addceb86612f114 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html dac4172e930f5dab54c885ef64041b9b9a7b350902cec084fb23bcb0174ab1e4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 6124c55cea9df76c7691c4a8c31f95ffa74dadc2c4fa6e5ce303f87590546134 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 16320674d0f5f40c71c0fe2873171d914a4736232f6ecf2c777bfd2267d5d335 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 4f161211729a5b6174339c9d57b42e1a53f64f01ac10f0a8a275769db0c53fde 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html abee807a6b06b3961e51e45b8778bd9e397f8feab04a033ea806f00abe243f8c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2fde1dc34c031f48254707fa5d5019bfb84b927f2b56509ce80f4ed32efc88f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 31ae75fdf385bedccb890ee2050637cb0c8d1fc532fa0eecff70dd2174973567 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html dd1b505edfd9b722737d422edf4ab5ce0e14b5e457ce7c714acc338960ce78dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 096094ad45af955bdec11fb05143fae512b6dc23b7e26f64468dc74604083522 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 8a47bf4109dab07e2d8ad9c0c86ca8275b853dce08589945cbbcfcaeff701f65 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 8a49f142713b880ede7d69c8e6f27ecc4fa257524c64794cfb453e958d0eadc4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html c2d23c4c27921c8e7e006fe31f2176f44af29dc2a6b43a4f13971fba6da4ef1a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html b9b606de3db626778cf3b6b97e356f7ecb20dfd7ae18ccba39bf3121e906ddb4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 525751f4aa3dec54e40a861e9a0e6bf5bb01069d4cf51b273e594cd3a66d9a23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html bef7344ed5933b1a8c0940aee930705d3cd5cdc33b70bbb0628eb7bb8f508e7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 7b7fd00661b99f20bc92eba82f665d7f8356576bd2753b3b0c01b8da02b3076c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 40d93b503afd277a07956f322709453ff61bc3c3b04dbbf88e112fbae9dc2e70 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2d5cfd750fc59110941a0608f746e8e0371689fd5f576193db49e4dadabaafee 2 @@ -16761,36 +16761,36 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 6b8347afada53209080f7957b6874253d7f3cf01e4a5978cb33a04af6e43827b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html d9ec8a6cf1b7953055fc9f38c1cbe872a3165de96b318ecd05b13a1e285a8a99 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 452dedb57a41c6c18a1ae2e5d5c34b903a874a738637cdc222386a9f1f8980b9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 41d2c4e564ae5208df93b105410396f47b4bc9e7264736b9d32692de9b0f1c10 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 10b88292594a5d4111526acbbc54f06abf7924ba7616a67720e3f707c6f0f9cb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html fcf9f0b057d017231513874eb27c51e801db1fdf6510d985b718bc5ca7f4f29d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 5305c455e8e5f3e064512e8bed2f6574218bbbfb0e7127c75a701e2b90475630 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 02328a4df1b03cbb609357daf31f3448d35673631b000d3e8e27d621d1dc588c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 544a68ffb21541b9daa0ca22c9ebe4271825e93f4d9fd464cb193d662f7f9318 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 89c1056146d338844aa62b6a98cab7e9924f2918f702b93cd779a7e7ba1a95b5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 5ef18775217bd64a788173cd9df02a819e3087cd97e17f7175330fc1b6450b33 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 4bd68f4b0101f1dc8896b0ab3bd0785e7d560a3f134488212dade0225bb10b35 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 10bcfa3dd36ba926eaebd7518b6161e4de2f36d0e62fd22d2163c5f449149e5e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html d327a0129b9ee949abb31c05ce3ddb9d1cc26024ac8e82e66e55176640d51434 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html 09ce66a2b372bd01cbf3fe31489d7eed3a71066a9b24022505e283ec1fe8549a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html 7e22b7453c6e6b1468e5e420d1bbd58c0550409866c72aa3090e8acabab74e41 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html f3ccaa87b3a43eef0ff5c163ae3d3129f6e8f0851cc52519b1a407cdb0324f5b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html bae818eeaf6e17a076670a0b24d270585fea7242ef2fdddb7c0495c224cff6d3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html a4b204e869abb5bfc52efb2cd77b09bc8c80646f0b95fd96d98c435cc5cfa0de 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 2ae609ed6550c5639341bcf9dd4cc6da601bf532e252f97d5e7fd3a23a27a8c7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html a082817b79183f18cd3a584d4e2979b77043c387f68f78aec6d6157121a279b8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 962895d4c480184929b2681f6d3e42c57998a1c6cbb9d645e3916d1b40a17002 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 23ed0e3abfbdda5b7cee9ca868357e32dd56d0c5d973807a61b432d07b4e1ebb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 49afb66ba3bdde79ba8d2d3699b745264584c2a0fe45a1c843cb3f77f4439673 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 28e7a46ba14450fbf4a009560b8ed0a8838ce15c178c13671f73642676d5ca80 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 95bb72d262bdcf53a42f562ca0cc5d0b3e3bc461c6190fa101b1d4ff862f3028 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html a79d661a270b63cc3996feeadc8c3a3c5ea23cbdebb788d693c04169f90821db 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html 58e0ff4013b14e3d7d8c68a75110b42a4b9cd8a8ccd4daf409cfb4b7bdec0e26 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 62149c77ec4169e883d34486c37bf12dddca2ecd33fa46de787624966d51ff15 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html f9c260ef5567c0e18b09279edcb1a7707f03f23203e5d420d7cb68c6195705d2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html e43076bb6c24a5c0724ef7acd79c4d5d87198399aba5af033a0ad6ee5da35b62 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html a72b267dc9e36d24861fc11da826b817726dd8710354a7a7e2d613722b0c79f1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_87.html 22efb105466708fab51a9a0515a1642f8c25a4900be03c25fc09f3081535b16c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html d2e92967f719aa942429157318edeefe76c5c2fc76d87ac711a4e5cef7b5b17b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 7d5d2b7079e945e38a1316c13b68b63e459b8cace0d6244fd0449273a7cf6aaf 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html 601786f5e23a0ca7304bd0885fbac9fba4ca25bcf008d6b90bb535267bfe4af7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 0c58ff8a5ff3782df8f949eb6202f4ba306d5aa4a5222a2b3f75949ea4492f17 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 100bf31aa04f36ce5dc9fe3452423cda26bb9d0c7a3bc7c878527590225aceeb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html dcc98a2358d1ecb898a5aeb641fdaae5ed283f982d24683e0b0efdf52b797b6e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 689e46d61c3207e13421ab9d9bb369061cab9ff7c7a075822d05fc14afea7313 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html bc1c1e3ee389d51bff5aa561b7f90e851b0414e19b385c60cf6e15e07c4fd527 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 64a4f5bb377f06a6513097db362e8430ba608569e571faf88bb6e64f0743a53f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 8440734a96131d60dcdff0a1649f34ddec6dd6288a9a6166f26254fc5cd647c1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 63adfa1d052e5c43a733a981ddf8cc1c10a6c9123eff99e3ad9820ce94f28751 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html a92ec517cb9e92ac9e50628d2116e58083b21d7bbb731eb0c7c1eb6dd0a27b65 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 11668b6cf66158c786f93b282639d765020aa2485b3678ef5c542ddec2519a31 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html c3e4c6a9a6b42845b0a1a7d7a805f71f8ae3196a72b0ba525ee0253a88b7f282 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 752cac63363906fce155ce88c6565d318a4532194635e6fdcf4e1f2472e0654a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html a6a24ec6d15f27132d38edc9e3d900b35048421a86319a82b54ee03859a264bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html a34913193da8030c1f86a10d1edec4f2ed5f157c4d3a9d2f3e8103b35901dd0f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html c2d9243161d2478e1c641dded4dd511e61f8a1ad99b4e37892d2dc6da7ef3d3a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html b88beaec1f3fda96d738fe74dff5b7b272dbe48c2a9c9ac5093d79df085fd2c7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 2b3e3cea659e65f71b6fe60262074ebdb9cbe37f3776e5f6244999e3f2bebb32 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 071d74efe65b18fac5c1fc88fa107c9dfb7f18fd4cc73aa5945c194742acbff7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html 0cd88a1c53c9f52b551f7c0c7c614b167cd4fa30aa37a9d4e02d7a31c04174f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 3eddfedfed082b40861d114dfcc3b1a86792ecdb58c6edeccbea349b4f26efa7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 4908cd247e97a147203cbc6588e4981dd0f1c42f2d7927911b2abd57d94fb76c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html b6360eff6eddc254758d5deefc7f857d299fdb119d233ea93ae1fefc677be75f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 2b76db28b6799adecdd18933c9d896e8e21dcd5597c85913c963d45b8a104249 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 017d8d41fc1bce89163b616a160220dee050ae9606c9eba919e2a9dad188970c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 7e0165a17c592b2b072f5d2b834082b3de805d1ef04a044ae3c34f3ebe2ba7af 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 1d4df9d3617904a0e292614008c559651f1da3d5db9b755a5f471d6964bfe4ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html ffd234f86530cdcde827604323a8ca2dcd711ea589b58ccc5182b99e78fe3957 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html abfe45fb7f0c8e347f9f3bfff28124ab98c4d466a5074fe2c784e0e3e9ece462 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 79a3ede7a5d68b621d1a76c228ebe32f606dfad658e82b4225f0288b19f92fe3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html a8153f341f27e18d767c2bc4a119cd9ccc9f633be4fb2006a75b1c9967f9bb47 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html b0026c106fd775fe4f10e0277db625b216f126d6a20afced58ad5cf001973aef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html 23493bc8b65687959ca8017ad3dcba9c19d0412f04a6610f4abb1f8777e919b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_87.html 2e23a6e4ce6df109533f5066ca6e42b73a46446949e178c8942f2b35c1edbda5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html cac2a1b7cd04fcc96157d52c17f8399c7f9968d2bb333d2bbd66e5f620063ecc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 618dfab243d1abbbd2cec26e4ee9e64bc455a40bd14e8f4526e41c59161e8d2f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html 52d25c0d80e192efc32712354eb99959268861b05aea509a47fadabb7705b695 2 @@ -16843 +16843 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 2122fe47a2766cf016d64dd644d342b9fb8307c2d9418efc463a0d25545f40a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 77b26d568231a0b293ebaa88c7dc87f4f9bdad9f3fa009e197e552155aa0dc39 2 @@ -16894 +16894 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html ac85651f95f88a5174eea3c3d9ca8a3abd753ffd9e24c3a8a54b3e6c28ed964c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html a273f608c23ea9b1e34d04eaec847bdfbf4074589130c3e5cce8aa647dab596b 2 @@ -16900 +16900 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html f96e95722ffc690a9d25bb3a50a5f9e39613edf9abc696ee263aa19153eda2ab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html bbe00ddfd497f507f27fef49b199bdcecc7d1863c5194182eac04c66f6f4bb4f 2 @@ -17080 +17080 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 71a7937fa755a3f9c9c5af3e5ce6d00152539a1440b877589482790691da3ccf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html e6104763104b92cd44ea7de8b7494ad48babc0f21d5a7c1e79921a30febd4130 2 @@ -17204 +17204 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 766ea5457a230af588c1a6da96ee7f5a7e9e156a54ed9c03b13078bcd378abee 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html b3dc037bedfedbeb9f547ed447949e7bf8d7d6bd81edcbbe3f9e2cb41a35a5e4 2 @@ -17208 +17208 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 64bc20bfbcac0d735c3535304dff737ffb69b94d52508833f3efd9e3e2d33a3a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 54e313c557cde2fdb435cd0bde0f0f28decd1a4bf9e5a489f3f45fe6d754b522 2 @@ -17212 +17212 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html d40d5caee3cf53a378d72aa70d9468898f412c5177a2044cfaceed575ca87e73 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 78a9c0149d896ccf880615924156b934f5f5b046aade7482441338fbc425b77b 2 @@ -17364 +17364 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 2726d726149f3a510d270ee5214f26340698c2801d572bf837ee4da2d10a028b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html b15a74005574db11dd53e9bea36da911661bb9f2ea91cf8a571003760b729e9e 2 @@ -17395 +17395 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 3e93b0e2bf3188126e370e93fccdbb5d99b1790d9a6fb2a06fa52e16a6a5dfd1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 9931571cf04a212d955cbc71a581ace2173fe50fcd08ffc227096657d565ce77 2 @@ -17440 +17440 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html dc4f7e43b6342dce9153048ca2398bf83a0a91521148968bbc5a33588e1876be 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 44dd68aa250f0c8b1323884a60cf2be0f57cc9b26d20bfbeacae0f13b84a0640 2 @@ -18344 +18344 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 09d6b9bad6965ee04f894e44d439185be98b43d2246bc2b7fd334416c2ab076c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html d2847e2db38dcaa5bb3dd99dde7a1d860a34ef59c57a13ae22ca1d9c696129dc 2 @@ -18348 +18348 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html e079f35e92bfb2329ba032001e74074ba0584dadea972685191cb88acbc40dab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html 7a533c77333db39fcaa6208d713922d2dcfb5e722bcbc8122499bd36285d8c0f 2 @@ -18380 +18380 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 43dd12757b794d6a1f1427a759a32a1892998d291e129e9e195b66a03bc3b3fe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html ad2afd01579ad02e41ce5e1369a8c198b9d7367c91b4ccf0a20137c389cb00f3 2 @@ -18444 +18444 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 3d62d491c88e9144a55152ddee9f9f007723a84d6d68dd9971edd62e964b1709 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html cd59ae60be146ecb8f9a402203752c406fdb98509d64f0d5faf395663e875106 2 comparing rpmtags comparing RELEASE comparing PROVIDES comparing scripts comparing filelist comparing file checksum creating rename script RPM file checksum differs. Extracting packages /usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-12-27 18:24:52.224759880 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-12-27 18:24:52.228759908 +0000 @@ -116,7 +116,7 @@
Block (linear algebra)

It is often convenient to treat a matrix or vector as a collection of individual blocks. For example, in step-20 (and other tutorial programs), we want to consider the global linear system $Ax=b$ in the form

-\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B^T \\ B & 0
   \end{array}\right)
@@ -127,9 +127,9 @@
   \left(\begin{array}{cc}
     F \\ G
   \end{array}\right),
-   \end{eqnarray*} + \end{eqnarray*}" src="form_92.png"/>

-

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

+

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

Using such a decomposition into blocks, one can then define preconditioners that are based on the individual operators that are present in a system of equations (for example the Schur complement, in the case of step-20), rather than the entire matrix. In essence, blocks are used to reflect the structure of a PDE system in linear algebra, in particular allowing for modular solvers for problems with multiple solution components. On the other hand, the matrix and right hand side vector can also treated as a unit, which is convenient for example during assembly of the linear system when one may not want to make a distinction between the individual components, or for an outer Krylov space solver that doesn't care about the block structure (e.g. if only the preconditioner needs the block structure).

Splitting matrices and vectors into blocks is supported by the BlockSparseMatrix, BlockVector, and related classes. See the overview of the various linear algebra classes in the Linear algebra classes topic. The objects present two interfaces: one that makes the object look like a matrix or vector with global indexing operations, and one that makes the object look like a collection of sub-blocks that can be individually addressed. Depending on context, one may wish to use one or the other interface.

Typically, one defines the sub-structure of a matrix or vector by grouping the degrees of freedom that make up groups of physical quantities (for example all velocities) into individual blocks of the linear system. This is defined in more detail below in the glossary entry on Block (finite element).

@@ -148,7 +148,7 @@
FE_Q<dim>(1), 1);

With the exception of the number of blocks, the two objects are the same for all practical purposes, however.

Global degrees of freedom: While we have defined blocks above in terms of the vector components of a vector-valued solution function (or, equivalently, in terms of the vector-valued finite element space), every shape function of a finite element is part of one block or another. Consequently, we can partition all degrees of freedom defined on a DoFHandler into individual blocks. Since by default the DoFHandler class enumerates degrees of freedom in a more or less random way, you will first want to call the DoFRenumbering::component_wise function to make sure that all degrees of freedom that correspond to a single block are enumerated consecutively.

-

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

+

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

More information on this topic can be found in the documentation of FESystem, the Handling vector valued problems topic and the tutorial programs referenced therein.

Selecting blocks: Many functions allow you to restrict their operation to certain vector components or blocks. For example, this is the case for the functions that interpolate boundary values: one may want to only interpolate the boundary values for the velocity block of a finite element field but not the pressure block. The way to do this is by passing a BlockMask argument to such functions, see the block mask entry of this glossary.

@@ -177,14 +177,14 @@
Boundary form

For a dim-dimensional triangulation in dim-dimensional space, the boundary form is a vector defined on faces. It is the vector product of the image of coordinate vectors on the surface of the unit cell. It is a vector normal to the surface, pointing outwards and having the length of the surface element.

-

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
-   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

+

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
+   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

In either case, the length of the vector equals the determinant of the transformation of reference face to the face of the current cell.

Boundary indicator

In a Triangulation object, every part of the boundary may be associated with a unique number (of type types::boundary_id) that is used to determine what kinds of boundary conditions are to be applied to a particular part of a boundary. The boundary is composed of the faces of the cells and, in 3d, the edges of these faces.

-

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
+

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
if (face->at_boundary())
if (face->center()[0] == -1)
face->set_boundary_id (42);
@@ -257,7 +257,7 @@

Component
-

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems topic.

+

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems topic.

In finite element programs, one frequently wants to address individual elements (components) of this vector-valued solution, or sets of components. For example, we do this extensively in step-8, and a lot of documentation is also provided in the topic on Handling vector valued problems. If you are thinking only in terms of the partial differential equation (not in terms of its discretization), then the concept of components is the natural one.

On the other hand, when talking about finite elements and degrees of freedom, components are not always the correct concept because components are not always individually addressable. In particular, this is the case for non-primitive finite elements. Similarly, one may not always want to address individual components but rather sets of components — e.g. all velocity components together, and separate from the pressure in the Stokes system, without further splitting the velocities into their individual components. In either case, the correct concept to think in is that of a block. Since each component, if individually addressable, is also a block, thinking in terms of blocks is most frequently the better strategy.

For a given finite element, the number of components can be queried using the FiniteElementData::n_components() function, and you can find out which vector components are nonzero for a given finite element shape function using FiniteElement::get_nonzero_components(). The values and gradients of individual components of a shape function (if the element is primitive) can be queried using the FiniteElement::shape_value_component() and FiniteElement::shape_grad_component() functions on the reference cell. The FEValues::shape_value_component() and FEValues::shape_grad_component() functions do the same on a real cell. See also the documentation of the FiniteElement and FEValues classes.

@@ -279,7 +279,7 @@

would result in a mask [true, true, false] in 2d. Of course, in 3d, the result would be [true, true, true, false].

Note
Just as one can think of composed elements as being made up of components or blocks, there are component masks (represented by the ComponentMask class) and block masks (represented by the BlockMask class). The FiniteElement class has functions that convert between the two kinds of objects.
-Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
+Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
Compressing distributed vectors and matrices

For parallel computations, deal.II uses the vector and matrix classes defined in the PETScWrappers and TrilinosWrappers namespaces. When running programs in parallel using MPI, these classes only store a certain number of rows or elements on the current processor, whereas the rest of the vector or matrix is stored on the other processors that belong to our MPI universe. This presents a certain problem when you assemble linear systems: we add elements to the matrix and right hand side vectors that may or may not be stored locally. Sometimes, we may also want to just set an element, not add to it.

@@ -321,9 +321,9 @@

Degree of freedom
-

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
-   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

+

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
+   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

Direction flags
@@ -345,7 +345,7 @@
Distorted cells

A distorted cell is a cell for which the mapping from the reference cell to real cell has a Jacobian whose determinant is non-positive somewhere in the cell. Typically, we only check the sign of this determinant at the vertices of the cell. The function GeometryInfo::alternating_form_at_vertices computes these determinants at the vertices.

-

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

+

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

The following two images show a well-formed, a pinched, and a twisted cell for both 2d and 3d:

@@ -384,19 +384,19 @@

Generalized support points
-

"Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

-

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

-

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
+<dd><p class="Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

+

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

+

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
     =
     \varphi(\hat{\mathbf{x}}_i) \cdot \mathbf{n}_i
-   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

-

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

-

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
+   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

+

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

+

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
     =
     \int_{\hat{K}} \varphi(\hat{\mathbf{x}})
     {\hat{x}_1}^{p_1(i)}
     {\hat{x}_2}^{p_2(i)}
-   $ in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

+ $" src="form_124.png"/> in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

geometry paper
@@ -506,47 +506,47 @@
Lumped mass matrix

The mass matrix is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

It frequently appears in the solution of time dependent problems where, if one uses an explicit time stepping method, it then leads to the need to solve problems of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        MU^n = MU^{n-1} + k_n BU^{n-1},
-     \end{align*} + \end{align*}" src="form_127.png"/>

-

in time step $n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

-

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

-

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

-\begin{align*}
+<p> in time step <picture><source srcset=$n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

+

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

+

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

+\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx
               = \sum_K \int_K \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_134.png"/>

by quadrature

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \sum_K \sum_q \varphi_i(\mathbf x_q^K) \varphi_j(\mathbf x_q^K)
        |K| w_q,
-     \end{align*} + \end{align*}" src="form_135.png"/>

where we choose the quadrature points as the nodes at which the shape functions are defined. If we order the quadrature points in the same way as the shape functions, then

-\begin{align*}
+<picture><source srcset=\begin{align*}
        \varphi_i(\mathbf x_q^K) = \delta_{iq},
-     \end{align*} + \end{align*}" src="form_136.png"/>

and consequently

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \delta_{ij} \sum_{K, \text{supp}\varphi_i \cap K \neq \emptyset} |K| w_i,
-     \end{align*} + \end{align*}" src="form_137.png"/>

-

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

-

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

+

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

+

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

For an example of where lumped mass matrices play a role, see step-69.

Manifold indicator

Every object that makes up a Triangulation (cells, faces, edges, etc.), is associated with a unique number (of type types::manifold_id) that is used to identify which manifold object is responsible to generate new points when the mesh is refined.

-

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

+

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

for (auto &cell : triangulation.active_cell_iterators())
if (cell->center()[0] < 0)
cell->set_manifold_id (42);
@@ -557,41 +557,41 @@
Mass matrix

The "mass matrix" is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

-

possibly with a coefficient inside the integral, and where $\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

-\begin{align*}
+<p> possibly with a coefficient inside the integral, and where <picture><source srcset=$\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

+\begin{align*}
        \rho \frac{\partial^2 u}{\partial t^2}
        -\nabla \cdot C \nabla u = f.
-     \end{align*} /usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-12-27 18:24:52.264760155 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-12-27 18:24:52.268760182 +0000 @@ -345,7 +345,7 @@

-step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
+step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
Keywords: FEInterfaceValues

/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2024-12-21 15:09:27.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2024-12-21 15:09:27.000000000 +0000 @@ -34,9 +34,6 @@ \fi \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -52,12 +49,6 @@ $\cal A$ \pagebreak -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ \pagebreak @@ -161,6 +152,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -340,12 +340,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -739,6 +733,12 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + $(A+k\,B)\,C$ \pagebreak @@ -1474,88 +1474,88 @@ \pagebreak \[ -[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], + M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). \] \pagebreak -$(x_0^L , ..., x_{spacedim-1}^L)$ +$m$ \pagebreak -$(x_0^U , ..., x_{spacedim-1}^U)$ +\[ m = I - \Delta t M. \] \pagebreak -$x_k^L$ +$ Mu+cF(u) $ \pagebreak -$x_k^U$ +$u$ \pagebreak -$(x,z)$ +$c$ \pagebreak -$(z,x)$ +$(1-\theta) \Delta t$ \pagebreak -$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ +$ \theta \Delta t$ \pagebreak \[ - M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). +u'(t) \approx +\frac{u(t+h) - +u(t-h)}{2h}. \] \pagebreak -$m$ +\[ +u'(t) \approx +\frac{u(t) - +u(t-h)}{h}. +\] \pagebreak -\[ m = I - \Delta t M. \] +\[ +u'(t) \approx +\frac{u(t-2h) - 8u(t-h) ++ 8u(t+h) - u(t+2h)}{12h}. +\] \pagebreak -$\in [0, 2^{\text{dim}} - 1]$ +\[ +[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], +\] \pagebreak -$\hat{B}$ +$(x_0^L , ..., x_{spacedim-1}^L)$ \pagebreak -$G(B) = \hat{B}$ +$(x_0^U , ..., x_{spacedim-1}^U)$ \pagebreak -$F(\hat{B}) = B$ +$x_k^L$ \pagebreak -$ Mu+cF(u) $ +$x_k^U$ \pagebreak -$u$ +$(x,z)$ \pagebreak -$c$ +$(z,x)$ \pagebreak -$(1-\theta) \Delta t$ +$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ \pagebreak -$ \theta \Delta t$ +$\in [0, 2^{\text{dim}} - 1]$ \pagebreak -\[ -u'(t) \approx -\frac{u(t+h) - -u(t-h)}{2h}. -\] +$\hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t) - -u(t-h)}{h}. -\] +$G(B) = \hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t-2h) - 8u(t-h) -+ 8u(t+h) - u(t+2h)}{12h}. -\] +$F(\hat{B}) = B$ \pagebreak $1/h$ @@ -1828,6 +1828,9 @@ $\lambda \approx 0.54448$ \pagebreak +$\sin(\lambda\omega)+\lambda \sin(\omega)=0$ +\pagebreak + $f : \Omega \rightarrow {\mathbb R}^{n_\text{components}}$ /usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2024-12-21 15:09:27.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2024-12-21 15:09:27.000000000 +0000 @@ -36,9 +36,6 @@ \fi \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -54,12 +51,6 @@ $\cal A$ \pagebreak -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ \pagebreak @@ -163,6 +154,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -342,12 +342,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -741,6 +735,12 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + $(A+k\,B)\,C$ \pagebreak @@ -1476,88 +1476,88 @@ \pagebreak \[ -[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], + M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). \] \pagebreak -$(x_0^L , ..., x_{spacedim-1}^L)$ +$m$ \pagebreak -$(x_0^U , ..., x_{spacedim-1}^U)$ +\[ m = I - \Delta t M. \] \pagebreak -$x_k^L$ +$ Mu+cF(u) $ \pagebreak -$x_k^U$ +$u$ \pagebreak -$(x,z)$ +$c$ \pagebreak -$(z,x)$ +$(1-\theta) \Delta t$ \pagebreak -$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ +$ \theta \Delta t$ \pagebreak \[ - M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). +u'(t) \approx +\frac{u(t+h) - +u(t-h)}{2h}. \] \pagebreak -$m$ +\[ +u'(t) \approx +\frac{u(t) - +u(t-h)}{h}. +\] \pagebreak -\[ m = I - \Delta t M. \] +\[ +u'(t) \approx +\frac{u(t-2h) - 8u(t-h) ++ 8u(t+h) - u(t+2h)}{12h}. +\] \pagebreak -$\in [0, 2^{\text{dim}} - 1]$ +\[ +[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], +\] \pagebreak -$\hat{B}$ +$(x_0^L , ..., x_{spacedim-1}^L)$ \pagebreak -$G(B) = \hat{B}$ +$(x_0^U , ..., x_{spacedim-1}^U)$ \pagebreak -$F(\hat{B}) = B$ +$x_k^L$ \pagebreak -$ Mu+cF(u) $ +$x_k^U$ \pagebreak -$u$ +$(x,z)$ \pagebreak -$c$ +$(z,x)$ \pagebreak -$(1-\theta) \Delta t$ +$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ \pagebreak -$ \theta \Delta t$ +$\in [0, 2^{\text{dim}} - 1]$ \pagebreak -\[ -u'(t) \approx -\frac{u(t+h) - -u(t-h)}{2h}. -\] +$\hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t) - -u(t-h)}{h}. -\] +$G(B) = \hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t-2h) - 8u(t-h) -+ 8u(t+h) - u(t+2h)}{12h}. -\] +$F(\hat{B}) = B$ \pagebreak $1/h$ @@ -1830,6 +1830,9 @@ $\lambda \approx 0.54448$ \pagebreak +$\sin(\lambda\omega)+\lambda \sin(\omega)=0$ +\pagebreak + $f : \Omega \rightarrow {\mathbb R}^{n_\text{components}}$ /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-12-27 18:24:52.524761940 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-12-27 18:24:52.528761968 +0000 @@ -708,7 +708,7 @@
  • -

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    +

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    (WB 2008/12/10)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-12-27 18:24:52.556762160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-12-27 18:24:52.564762215 +0000 @@ -514,7 +514,7 @@
  • -

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    +

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    (Jean Marie Linhart 2009/07/27)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-12-27 18:24:52.596762435 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-12-27 18:24:52.600762462 +0000 @@ -852,7 +852,7 @@

  • -

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    +

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    (Wolfgang Bangerth, 2013/12/20)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 2024-12-27 18:24:52.636762709 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 2024-12-27 18:24:52.640762737 +0000 @@ -567,7 +567,7 @@

  • -

    New: The VectorTools::integrate_difference() function can now also compute the $H_\text{div}$ seminorm, using the VectorTools::Hdiv_seminorm argument.
    +

    New: The VectorTools::integrate_difference() function can now also compute the $H_\text{div}$ seminorm, using the VectorTools::Hdiv_seminorm argument.
    (Zhen Tao, Arezou Ghesmati, Wolfgang Bangerth, 2015/04/17)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-12-27 18:24:52.696763122 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-12-27 18:24:52.700763149 +0000 @@ -621,7 +621,7 @@

  • -

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    +

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    (Martin Kronbichler, 2020/04/07)

  • @@ -1575,7 +1575,7 @@

  • -

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    +

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    (Martin Kronbichler, 2019/07/12)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-12-27 18:24:52.780763699 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-12-27 18:24:52.788763754 +0000 @@ -388,9 +388,9 @@

    The algorithms used in the implementation of this class are described in some detail in the hp-paper. There is also a significant amount of documentation on how to use this class in the Constraints on degrees of freedom topic.

    Description of constraints

    Each "line" in objects of this class corresponds to one constrained degree of freedom, with the number of the line being i, entered by using add_line() or add_lines(). The entries in this line are pairs of the form (j,aij), which are added by add_entry() or add_entries(). The organization is essentially a SparsityPattern, but with only a few lines containing nonzero elements, and therefore no data wasted on the others. For each line, which has been added by the mechanism above, an elimination of the constrained degree of freedom of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_j a_{ij} x_j + b_i
-\] +\]" src="form_1653.png"/>

    is performed, where bi is optional and set by set_inhomogeneity(). Thus, if a constraint is formulated for instance as a zero mean value of several degrees of freedom, one of the degrees has to be chosen to be eliminated.

    Note that the constraints are linear in the xi, and that there might be a constant (non-homogeneous) term in the constraint. This is exactly the form we need for hanging node constraints, where we need to constrain one degree of freedom in terms of others. There are other conditions of this form possible, for example for implementing mean value conditions as is done in the step-11 tutorial program. The name of the class stems from the fact that these constraints can be represented in matrix form as X x = b, and this object then describes the matrix X and the vector b. The most frequent way to create/fill objects of this type is using the DoFTools::make_hanging_node_constraints() function. The use of these objects is first explained in step-6.

    @@ -981,27 +981,27 @@

    Add a constraint to this object. This function adds a constraint of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_{j=1}^n w_j x_{k_j} + b
-\] +\]" src="form_1654.png"/>

    -

    where $i$ is the number of the degree of freedom to be constrained and is provided by the constrained_dofs argument. The weights $w_j$ and indices $k_j$ are provided as pairs in the dependencies argument, and the inhomogeneity $b$ is provided by the last argument.

    +

    where $i$ is the number of the degree of freedom to be constrained and is provided by the constrained_dofs argument. The weights $w_j$ and indices $k_j$ are provided as pairs in the dependencies argument, and the inhomogeneity $b$ is provided by the last argument.

    As an example, if you want to add the constraint

    -\[
+<picture><source srcset=\[
   x_{42} = 0.5 x_{12} + 0.5 x_{36} + 27
-\] +\]" src="form_1656.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {{12, 0.5}, {36, 0.5}}, 27.0);

    On the other hand, if (as one often wants to) you need a constraint of the kind

    -\[
+<picture><source srcset=\[
   x_{42} = 27
-\] +\]" src="form_1657.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {}, 27.0);

    If you want to constrain a degree of freedom to zero, i.e., require that

    -\[
+<picture><source srcset=\[
   x_{42} = 0
-\] +\]" src="form_1658.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {}, 0.0);

    That said, this special case can be achieved in a more obvious way by calling

    constraints.constrain_dof_to_zero (42);
    @@ -1026,9 +1026,9 @@

    Constrain the given degree of freedom to be zero, i.e., require a constraint like

    -\[
+<picture><source srcset=\[
   x_{42} = 0.
-\] +\]" src="form_1659.png"/>

    Calling this function is equivalent to, but more readable than, saying

    constraints.add_constraint (42, {}, 0.0);

    It is not an error to call this function more than once on the same degree of freedom, but it is an error to call this function on a degree of freedom that has previously been constrained to either a different value than zero, or to a linear combination of degrees of freedom via the add_constraint() function.

    @@ -1161,13 +1161,13 @@
    -

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    +

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    If an entry with the same indices as the one this function call denotes already exists, then this function simply returns provided that the value of the entry is the same. Thus, it does no harm to enter a constraint twice.

    Parameters
    - - - + + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    @@ -1228,11 +1228,11 @@
    -

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    +

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    Parameters
    - - + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    @@ -1260,9 +1260,9 @@

    Close the filling of entries. Since the lines of a matrix of this type are usually filled in an arbitrary order and since we do not want to use associative constrainers to store the lines, we need to sort the lines and within the lines the columns before usage of the matrix. This is done through this function.

    Also, zero entries are discarded, since they are not needed.

    After closing, no more entries are accepted. If the object was already closed, then this function returns immediately.

    -

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
-+ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
-\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    +

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
++ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
+\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    @@ -1378,7 +1378,7 @@

    This function provides a "view" into a constraint object. Specifically, given a "mask" index set that describes which constraints we are interested in, it returns an AffineConstraints object that contains only those constraints that correspond to degrees of freedom that are listed in the mask, with indices shifted so that they correspond to the position within the mask. This process is the same as how IndexSet::get_view() computes the shifted indices. The function is typically used to extract from an AffineConstraints object corresponding to a DoFHandler only those constraints that correspond to a specific variable (say, to the velocity in a Stokes system) so that the resulting AffineConstraints object can be applied to a single block of a block vector of solutions; in this case, the mask would be the index set of velocity degrees of freedom, as a subset of all degrees of freedom.

    This function can only work if the degrees of freedom selected by the mask are constrained only against other degrees of freedom that are listed in the mask. In the example above, this means that constraints for the selected velocity degrees of freedom are only against other velocity degrees of freedom, but not against any pressure degrees of freedom. If that is not so, an assertion will be triggered.

    -

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. As part of this, you will want to apply constraints (using the distribute() function of this class) to only the 2-block vector, but for this you need to obtain an AffineConstraints object that represents only those constraints that correspond to the variables in question, and in the order in which they appear in the 2-block vector rather than in global 4-block vectors. This function allows you to extract such an object corresponding to a subset of constraints by applying a mask to the global constraints object that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom.

    +

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. As part of this, you will want to apply constraints (using the distribute() function of this class) to only the 2-block vector, but for this you need to obtain an AffineConstraints object that represents only those constraints that correspond to the variables in question, and in the order in which they appear in the 2-block vector rather than in global 4-block vectors. This function allows you to extract such an object corresponding to a subset of constraints by applying a mask to the global constraints object that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom.

    @@ -1718,9 +1718,9 @@

    Print the constraints represented by the current object to the given stream.

    For each constraint of the form

    -\[
+<picture><source srcset=\[
  x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75
-\] +\]" src="form_1668.png"/>

    this function will write a sequence of lines that look like this:

    42 2 : 0.5
    42 14 : 0.25
    @@ -2298,7 +2298,7 @@

    This function takes a matrix of local contributions (local_matrix) corresponding to the degrees of freedom indices given in local_dof_indices and distributes them to the global matrix. In other words, this function implements a scatter operation. In most cases, these local contributions will be the result of an integration over a cell or face of a cell. However, as long as local_matrix and local_dof_indices have the same number of elements, this function is happy with whatever it is given.

    In contrast to the similar function in the DoFAccessor class, this function also takes care of constraints, i.e. if one of the elements of local_dof_indices belongs to a constrained node, then rather than writing the corresponding element of local_matrix into global_matrix, the element is distributed to the entries in the global matrix to which this particular degree of freedom is constrained.

    -

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    +

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    Note
    The procedure described above adds an unforeseeable number of artificial eigenvalues to the spectrum of the matrix. Therefore, it is recommended to use the equivalent function with two local index vectors in such a case.

    By using this function to distribute local contributions to the global object, one saves the call to the condense function after the vectors and matrices are fully assembled.

    Note
    This function in itself is thread-safe, i.e., it works properly also when several threads call it simultaneously. However, the function call is only thread-safe if the underlying global matrix allows for simultaneous access and the access is not to rows with the same global index at the same time. This needs to be made sure from the caller's site. There is no locking mechanism inside this method to prevent data races.
    @@ -2340,7 +2340,7 @@

    This function does almost the same as the function above but can treat general rectangular matrices. The main difference to achieve this is that the diagonal entries in constrained rows are left untouched instead of being filled with arbitrary values.

    -

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    +

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    for (unsigned int i=0;i<matrix.m();++i)
    if (constraints.is_constrained(i))
    matrix.diag_element(i) = 1.;
    @@ -2629,7 +2629,7 @@
    -

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    +

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    Note
    If this function is called with a parallel vector vec, then the vector must not contain ghost elements.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-12-27 18:24:52.840764111 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-12-27 18:24:52.840764111 +0000 @@ -232,9 +232,9 @@

    For fixed theta, the Crank-Nicolson scheme is the only second order scheme. Nevertheless, further stability may be achieved by choosing theta larger than ½, thereby introducing a first order error term. In order to avoid a loss of convergence order, the adaptive theta scheme can be used, where theta=½+c dt.

    Assume that we want to solve the equation u' + F(u) = 0 with a step size k. A step of the theta scheme can be written as

    -\[
+<picture><source srcset=\[
   M u_{n+1} + \theta k F(u_{n+1})  = M u_n - (1-\theta)k F(u_n).
-\] +\]" src="form_351.png"/>

    Here, M is the mass matrix. We see, that the right hand side amounts to an explicit Euler step with modified step size in weak form (up to inversion of M). The left hand side corresponds to an implicit Euler step with modified step size (right hand side given). Thus, the implementation of the theta scheme will use two Operator objects, one for the explicit, one for the implicit part. Each of these will use its own TimestepData to account for the modified step sizes (and different times if the problem is not autonomous). Note that once the explicit part has been computed, the left hand side actually constitutes a linear or nonlinear system which has to be solved.

    Usage AnyData

    @@ -315,8 +315,8 @@
    }
    size_type n() const
    size_type m() const
    -

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    -\[ m = I - \Delta t M. \] +

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    +\[ m = I - \Delta t M. \]

    After we have worked off the notifications, we clear them, such that the matrix is only generated when necessary.

    void Explicit::operator()(AnyData &out, const AnyData &in)
    @@ -1156,7 +1156,7 @@

    The operator computing the explicit part of the scheme. This will receive in its input data the value at the current time with name "Current time solution". It should obtain the current time and time step size from explicit_data().

    -

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    +

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    Definition at line 415 of file theta_timestepping.h.

    @@ -1184,7 +1184,7 @@

    The operator solving the implicit part of the scheme. It will receive in its input data the vector "Previous time". Information on the timestep should be obtained from implicit_data().

    -

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    +

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    Definition at line 427 of file theta_timestepping.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-12-27 18:24:52.880764385 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-12-27 18:24:52.884764413 +0000 @@ -177,10 +177,10 @@

    Detailed Description

    template<int dim>
    class AnisotropicPolynomials< dim >

    Anisotropic tensor product of given polynomials.

    -

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
+<p>Given one-dimensional polynomials <picture><source srcset=$P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
 = P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    -

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    +

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    Definition at line 321 of file tensor_product_polynomials.h.

    Constructor & Destructor Documentation

    @@ -695,7 +695,7 @@
    -

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one-dimensional polynomials for each space direction, given the index i.

    +

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one-dimensional polynomials for each space direction, given the index i.

    Definition at line 713 of file tensor_product_polynomials.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-12-27 18:24:52.920764660 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-12-27 18:24:52.924764688 +0000 @@ -243,14 +243,14 @@

    Detailed Description

    Interface for using ARPACK. ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines dnaupd and dneupd of ARPACK. If the operator is specified to be symmetric we use the symmetric interface dsaupd and dseupd of ARPACK instead. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes with serial objects in the following way:

    solver.solve(A, B, OP, lambda, x, size_of_spectrum);
    SolverControl & solver_control
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    Through the AdditionalData the user can specify some of the parameters to be set.

    For further information on how the ARPACK routines dsaupd, dseupd, dnaupd and dneupd work and also how to set the parameters appropriately please take a look into the ARPACK manual.

    Note
    Whenever you eliminate degrees of freedom using AffineConstraints, you generate spurious eigenvalues and eigenvectors. If you make sure that the diagonals of eliminated matrix rows are all equal to one, you get a single additional eigenvalue. But beware that some functions in deal.II set these diagonals to rather arbitrary (from the point of view of eigenvalue problems) values. See also step-36 for an example.
    @@ -523,7 +523,7 @@
    -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    The function returns a vector of eigenvalues of length n and a vector of eigenvectors of length n in the symmetric case and of length n+1 in the non-symmetric case. In the symmetric case all eigenvectors are real. In the non-symmetric case complex eigenvalues always occur as complex conjugate pairs. Therefore the eigenvector for an eigenvalue with nonzero complex part is stored by putting the real and the imaginary parts in consecutive real-valued vectors. The eigenvector of the complex conjugate eigenvalue does not need to be stored, since it is just the complex conjugate of the stored eigenvector. Thus, if the last n-th eigenvalue has a nonzero imaginary part, Arpack needs in total n+1 real-valued vectors to store real and imaginary parts of the eigenvectors.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-12-27 18:24:52.980765072 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-12-27 18:24:52.988765127 +0000 @@ -1027,7 +1027,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-12-27 18:24:53.036765457 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-12-27 18:24:53.040765484 +0000 @@ -365,27 +365,27 @@

    Names of difference formulas.

    Enumerator
    Euler 

    The symmetric Euler formula of second order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

    UpwindEuler 

    The upwind Euler formula of first order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

    FourthOrder 

    The fourth order scheme

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-12-27 18:24:53.068765676 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-12-27 18:24:53.076765731 +0000 @@ -166,7 +166,7 @@ (x, y) = c_0 (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2). \]" src="form_638.png"/>

    -

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    +

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    \[
   (x, y) = (1 - c_1 - c_2) (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2).
/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-12-27 18:24:53.108765951 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-12-27 18:24:53.112765978 +0000
@@ -169,8 +169,8 @@
 <a name=

    Detailed Description

    template<typename VectorType>
    class BaseQR< VectorType >

    A base class for thin QR implementations.

    -

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    -

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    +

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    +

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    Definition at line 43 of file qr.h.

    Member Typedef Documentation

    @@ -413,7 +413,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -447,7 +447,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -481,7 +481,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -515,7 +515,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-12-27 18:24:53.148766226 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-12-27 18:24:53.156766281 +0000 @@ -223,7 +223,7 @@ void swap (BlockIndices &u, BlockIndices &v) noexcept &#href_anchor"details" id="details">

    Detailed Description

    -

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    +

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    The information that can be obtained from this class falls into two groups. First, it is possible to query the global size of the index space (through the total_size() member function), and the number of blocks and their sizes (via size() and the block_size() functions).

    Secondly, this class manages the conversion of global indices to the local indices within this block, and the other way around. This is required, for example, when you address a global element in a block vector and want to know within which block this is, and which index within this block it corresponds to. It is also useful if a matrix is composed of several blocks, where you have to translate global row and column indices to local ones.

    See also
    Block (linear algebra)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-12-27 18:24:53.220766720 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-12-27 18:24:53.224766747 +0000 @@ -800,9 +800,9 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, BlockPayload::BlockType > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -843,9 +843,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -1543,7 +1543,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1556,60 +1556,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1940.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1945.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1947.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1948.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1949.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1956.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1958.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1960.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1961.png"/>

    @@ -1655,10 +1655,10 @@
    LinearOperator< Domain, Range, BlockPayload::BlockType > inverse_operator(const LinearOperator< Range, Domain, BlockPayload::BlockType > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1681,8 +1681,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc. The solution of a multi-component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc.

    See also
    Block (linear algebra)
    @@ -1705,7 +1705,7 @@
    -

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    Definition at line 296 of file block_linear_operator.h.

    @@ -1724,7 +1724,7 @@
    -

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    Definition at line 302 of file block_linear_operator.h.

    @@ -1743,7 +1743,7 @@
    -

    Access the block with the given coordinates. This std::function object returns a LinearOperator representing the $(i,j)$-th block of the BlockLinearOperator.

    +

    Access the block with the given coordinates. This std::function object returns a LinearOperator representing the $(i,j)$-th block of the BlockLinearOperator.

    Definition at line 309 of file block_linear_operator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-12-27 18:24:53.280767133 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-12-27 18:24:53.284767160 +0000 @@ -1309,7 +1309,7 @@ const BlockVectorType & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1757,7 +1757,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -1881,7 +1881,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-12-27 18:24:53.356767654 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-12-27 18:24:53.360767682 +0000 @@ -954,7 +954,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 398 of file block_sparse_matrix.h.

    @@ -1082,7 +1082,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 442 of file block_sparse_matrix.h.

    @@ -2079,7 +2079,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2627,7 +2627,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2735,7 +2735,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-12-27 18:24:53.408768012 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-12-27 18:24:53.412768039 +0000 @@ -767,7 +767,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 370 of file block_sparse_matrix_ez.h.

    @@ -792,7 +792,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 408 of file block_sparse_matrix_ez.h.

    @@ -817,7 +817,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    Definition at line 390 of file block_sparse_matrix_ez.h.

    @@ -842,7 +842,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    Definition at line 428 of file block_sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-12-27 18:24:53.488768561 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-12-27 18:24:53.492768588 +0000 @@ -1825,7 +1825,7 @@
    -

    $U = U * V$: scalar product.

    +

    $U = U * V$: scalar product.

    @@ -1851,7 +1851,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1903,7 +1903,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1929,7 +1929,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1955,7 +1955,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    @@ -1990,7 +1990,7 @@
    return_value = *this * W;
    void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

    The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

    -

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    +

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    @@ -2237,7 +2237,7 @@
    -

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    +

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-12-27 18:24:53.544768945 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-12-27 18:24:53.552769000 +0000 @@ -1257,7 +1257,7 @@
    -

    $U = U * V$: scalar product.

    +

    $U = U * V$: scalar product.

    @@ -1277,7 +1277,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1317,7 +1317,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1337,7 +1337,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1357,7 +1357,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    @@ -1387,7 +1387,7 @@
    return_value = *this * W;
    void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

    The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

    -

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    +

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    @@ -1587,7 +1587,7 @@
    -

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    +

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-12-27 18:24:53.596769302 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-12-27 18:24:53.600769330 +0000 @@ -179,11 +179,11 @@ &#href_anchor"details" id="details">

    Detailed Description

    template<int spacedim, typename Number = double>
    class BoundingBox< spacedim, Number >

    A class that represents a box of arbitrary dimension spacedim and with sides parallel to the coordinate axes, that is, a region

    -\[
+<picture><source srcset=\[
 [x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U],
-\] +\]" src="form_362.png"/>

    -

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    +

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    Geometrically, a bounding box is thus:

    Bounding boxes are, for example, useful in parallel distributed meshes to give a general description of the owners of each portion of the mesh. More generally, bounding boxes are often used to roughly describe a region of space in which an object is contained; if a candidate point is not within the bounding box (a test that is cheap to execute), then it is not necessary to perform an expensive test whether the candidate point is in fact inside the object itself. Bounding boxes are therefore often used as a first, cheap rejection test before more detailed checks. As such, bounding boxes serve many of the same purposes as the convex hull, for which it is also relatively straightforward to compute whether a point is inside or outside, though not quite as cheap as for the bounding box.

    -

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    +

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    @@ -744,7 +744,7 @@
    Orthogonal to Cross section coordinates ordered as
    -

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    +

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    Definition at line 232 of file bounding_box.cc.

    @@ -812,7 +812,7 @@

    Apply the affine transformation that transforms this BoundingBox to a unit BoundingBox object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    Definition at line 311 of file bounding_box.cc.

    @@ -835,7 +835,7 @@

    Apply the affine transformation that transforms the unit BoundingBox object to this object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    Definition at line 326 of file bounding_box.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-12-27 18:24:53.644769632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-12-27 18:24:53.648769659 +0000 @@ -793,7 +793,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    Definition at line 511 of file cuda_sparse_matrix.cc.

    @@ -816,7 +816,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    Definition at line 529 of file cuda_sparse_matrix.cc.

    @@ -839,7 +839,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    +

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    Definition at line 547 of file cuda_sparse_matrix.cc.

    @@ -862,7 +862,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    +

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    Definition at line 565 of file cuda_sparse_matrix.cc.

    @@ -884,7 +884,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    Definition at line 583 of file cuda_sparse_matrix.cc.

    @@ -936,8 +936,8 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & b&#href_anchor"memdoc"> -

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    -

    Source $x$ and destination $dst$ must not be the same vector.

    +

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    +

    Source $x$ and destination $dst$ must not be the same vector.

    Definition at line 610 of file cuda_sparse_matrix.cc.

    @@ -959,8 +959,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    Definition at line 625 of file cuda_sparse_matrix.cc.

    @@ -982,8 +982,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$.

    Definition at line 644 of file cuda_sparse_matrix.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-12-27 18:24:53.692769962 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-12-27 18:24:53.696769989 +0000 @@ -206,37 +206,37 @@

    Detailed Description

    template<int dim, int spacedim = dim, int chartdim = dim>
    class ChartManifold< dim, spacedim, chartdim >

    This class describes mappings that can be expressed in terms of charts. Specifically, this class with its template arguments describes a chart of dimension chartdim, which is part of a Manifold<dim,spacedim> and is used in an object of type Triangulation<dim,spacedim>: It specializes a Manifold of dimension chartdim embedded in a manifold of dimension spacedim, for which you have explicit pull_back() and push_forward() transformations. Its use is explained in great detail in step-53.

    -

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    -\[ F: \mathcal{B} \subset
-R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \] +

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    +\[ F: \mathcal{B} \subset
+R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \]

    (the push_forward() function) and that admits the inverse transformation

    -\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
-\subset R^{\text{chartdim}} \] +\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
+\subset R^{\text{chartdim}} \]

    (the pull_back() function).

    The get_new_point() function of the ChartManifold class is implemented by calling the pull_back() method for all surrounding_points, computing their weighted average in the chartdim Euclidean space, and calling the push_forward() method with the resulting point, i.e.,

    -\[
-\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \] +\[
+\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \]

    Derived classes are required to implement the push_forward() and the pull_back() methods. All other functions (with the exception of the push_forward_gradient() function, see below) that are required by mappings will then be provided by this class.

    Providing function gradients

    -

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    +

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    Only the ChartManifold::get_tangent_vector() function uses the gradient of the push-forward, but only a subset of all finite element codes actually require the computation of tangent vectors. Consequently, while derived classes need to implement the abstract virtual push_forward() and pull_back() functions of this class, they do not need to implement the virtual push_forward_gradient() function. Rather, that function has a default implementation (and consequently is not abstract, therefore not forcing derived classes to overload it), but the default implementation clearly can not compute anything useful and therefore simply triggers and exception.

    A note on the template arguments

    The dimension arguments chartdim, dim and spacedim must satisfy the following relationships:

    dim <= spacedim
    chartdim <= spacedim

    However, there is no a priori relationship between dim and chartdim. For example, if you want to describe a mapping for an edge (a 1d object) in a 2d triangulation embedded in 3d space, you could do so by parameterizing it via a line

    -\[
+<picture><source srcset=\[
      F: [0,1] \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1502.png"/>

    in which case chartdim is 1. On the other hand, there is no reason why one can't describe this as a mapping

    -\[
+<picture><source srcset=\[
      F: {\mathbb R}^3 \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1503.png"/>

    -

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    +

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    Definition at line 907 of file manifold.h.

    Member Typedef Documentation

    @@ -561,7 +561,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -595,24 +595,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -621,11 +621,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-12-27 18:24:53.756770401 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-12-27 18:24:53.764770456 +0000 @@ -1049,7 +1049,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by ChunkSparsityPattern::symmetrize().

    @@ -1380,7 +1380,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    @@ -1454,8 +1454,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -1475,8 +1475,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -2175,7 +2175,7 @@
    -

    Return the location of entry $(i,j)$ within the val array.

    +

    Return the location of entry $(i,j)$ within the val array.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-12-27 18:24:53.820770841 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-12-27 18:24:53.824770869 +0000 @@ -1136,7 +1136,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 519 of file chunk_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-12-27 18:24:53.868771171 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-12-27 18:24:53.876771226 +0000 @@ -579,24 +579,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -605,11 +605,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-12-27 18:24:53.928771583 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-12-27 18:24:53.936771638 +0000 @@ -414,7 +414,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Implements ChartManifold< dim, dim, 3 >.

    @@ -446,7 +446,7 @@
    -

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1111 of file manifold_lib.cc.

    @@ -476,7 +476,7 @@
    -

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1131 of file manifold_lib.cc.

    @@ -726,7 +726,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -756,24 +756,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -782,11 +782,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-12-27 18:24:53.968771857 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-12-27 18:24:53.972771885 +0000 @@ -196,7 +196,7 @@

    As a consequence, DataOut is forced to take things apart into their real and imaginary parts, and both are output as separate quantities. This is the case for data that is written directly to a file by DataOut, but it is also the case for data that is first routed through DataPostprocessor objects (or objects of their derived classes): All these objects see is a collection of real values, even if the underlying solution vector was complex-valued.

    All of this has two implications:

    • If a solution vector is complex-valued, then this results in at least two input components at each evaluation point. As a consequence, the DataPostprocessor::evaluate_scalar_field() function is never called, even if the underlying finite element had only a single solution component. Instead, DataOut will always call DataPostprocessor::evaluate_vector_field().
    • -
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.
    • +
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.

    step-58 provides an example of how this class (or, rather, the derived DataPostprocessorScalar class) is used in a complex-valued situation.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-12-27 18:24:54.012772160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-12-27 18:24:54.012772160 +0000 @@ -269,7 +269,7 @@

    These pictures show an ellipse representing the gradient tensor at, on average, every tenth mesh point. You may want to read through the documentation of the VisIt visualization program (see https://wci.llnl.gov/simulation/computer-codes/visit/) for an interpretation of how exactly tensors are visualizated.

    -

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    +

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    class StrainPostprocessor : public DataPostprocessorTensor<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-12-27 18:24:54.052772434 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-12-27 18:24:54.060772489 +0000 @@ -260,7 +260,7 @@

    In the second image, the background color corresponds to the magnitude of the gradient vector and the vector glyphs to the gradient itself. It may be surprising at first to see that from each vertex, multiple vectors originate, going in different directions. But that is because the solution is only continuous: in general, the gradient is discontinuous across edges, and so the multiple vectors originating from each vertex simply represent the differing gradients of the solution at each adjacent cell.

    -

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    +

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    class HeatFluxPostprocessor : public DataPostprocessorVector<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-12-27 18:24:54.084772654 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-12-27 18:24:54.092772709 +0000 @@ -248,7 +248,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 490 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-12-27 18:24:54.112772846 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-12-27 18:24:54.120772901 +0000 @@ -243,7 +243,7 @@
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 629 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-12-27 18:24:54.156773148 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-12-27 18:24:54.156773148 +0000 @@ -497,7 +497,7 @@
    -

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    +

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    @@ -517,7 +517,7 @@
    -

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
+<p>Assuming that the current object stores the Jacobian of a mapping <picture><source srcset=$\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
 F$ is a square matrix (i.e., $\mathbf F:
 {\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    @@ -634,7 +634,7 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -768,7 +768,7 @@
   </tr>
 </table>
 </div><div class= -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    \[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html	2024-12-27 18:24:54.208773505 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html	2024-12-27 18:24:54.212773533 +0000
@@ -573,7 +573,7 @@
   </tr>
 </table>
 </div><div class= -

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    +

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    \[
   \mathbf{r}(\mathbf{X}) =
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
@@ -618,7 +618,7 @@
   </tr>
 </table>
 </div><div class= -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    +

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    \[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
     =
/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html	2024-12-27 18:24:54.244773753 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html	2024-12-27 18:24:54.252773807 +0000
@@ -202,7 +202,7 @@
 <p>Since time is marched forward in a discrete manner in our simulations, we need to discuss how we increment time. During time stepping we enter two separate alternating regimes in every step.</p>
 <ul>
 <li>The <b>snapshot</b> stage (the <b>current</b> stage, the <b>consistent</b> stage): In this part of the algorithm, we are at <picture><source srcset=$t = t_n$ and all quantities of the simulation (displacements, strains, temperatures, etc.) are up-to-date for $t = t_n$. In this stage, current time refers to $t_n$, next time refers to $t_{n+1}$, previous time refers to $t_{n-1}$. The other useful notation quantities are the next time step size $t_{n+1} - t_n$ and previous time step size $t_n - t_{n-1}$. In this stage, it is a perfect occasion to generate text output using print commands within the user's code. Additionally, post-processed outputs can be prepared here, which can then later be viewed by visualization programs such as Tecplot, Paraview, and VisIt. Additionally, during the snapshot stage, the code can assess the quality of the previous step and decide whether it wants to increase or decrease the time step size. The step size for the next time step can be modified here, by calling set_desired_next_step_size(). -

  • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

    +
  • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

    The question arises whether time should be incremented before updating state quantities. Multiple possibilities exist, depending on program and formulation requirements, and possibly the programmer's preferences:

    • Time is incremented before the rest of the updates. In this case, even though time is incremented to $t_{n+1}$, not all variables are updated yet. During this update phase, $dt$ equals the previous time step size. Previous means that it is referring to the $dt$ of the advance_time() command that was performed previously. In the following example code, we are assuming that a and b are two state variables that need to be updated in this time step.
      time.advance_time();
      new_a = update_a(a, b, time.get_previous_step_size());
      /usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-12-27 18:24:54.328774329 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-12-27 18:24:54.332774357 +0000 @@ -422,7 +422,7 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class DoFHandler< dim, spacedim >

      Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

      +class DoFHandler< dim, spacedim >

      Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

      It is first used in the step-2 tutorial program.

      For each 0d, 1d, 2d, and 3d subobject, this class stores a list of the indices of degrees of freedom defined on this DoFHandler. These indices refer to the unconstrained degrees of freedom, i.e. constrained degrees of freedom are numbered in the same way as unconstrained ones, and are only later eliminated. This leads to the fact that indices in global vectors and matrices also refer to all degrees of freedom and some kind of condensation is needed to restrict the systems of equations to the unconstrained degrees of freedom only. The actual layout of storage of the indices is described in the internal::DoFHandlerImplementation::DoFLevel class documentation.

      The class offers iterators to traverse all cells, in much the same way as the Triangulation class does. Using the begin() and end() functions (and companions, like begin_active()), one can obtain iterators to walk over cells, and query the degree of freedom structures as well as the triangulation data. These iterators are built on top of those of the Triangulation class, but offer the additional information on degrees of freedom functionality compared to pure triangulation iterators. The order in which dof iterators are presented by the ++ and -- operators is the same as that for the corresponding iterators traversing the triangulation on which this DoFHandler is constructed.

      @@ -439,7 +439,7 @@

      Like many other classes in deal.II, the DoFHandler class can stream its contents to an archive using BOOST's serialization facilities. The data so stored can later be retrieved again from the archive to restore the contents of this object. This facility is frequently used to save the state of a program to disk for possible later resurrection, often in the context of checkpoint/restart strategies for long running computations or on computers that aren't very reliable (e.g. on very large clusters where individual nodes occasionally fail and then bring down an entire MPI job).

      The model for doing so is similar for the DoFHandler class as it is for the Triangulation class (see the section in the general documentation of that class). In particular, the load() function does not exactly restore the same state as was stored previously using the save() function. Rather, the function assumes that you load data into a DoFHandler object that is already associated with a triangulation that has a content that matches the one that was used when the data was saved. Likewise, the load() function assumes that the current object is already associated with a finite element object that matches the one that was associated with it when data was saved; the latter can be achieved by calling DoFHandler::distribute_dofs() using the same kind of finite element before re-loading data from the serialization archive.

      hp-adaptive finite element methods

      -

      Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

      +

      Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

      The whole process of working with objects of this type is explained in step-27. Many of the algorithms this class implements are described in the hp-paper.

      Active FE indices and their behavior under mesh refinement

      The typical workflow for using this class is to create a mesh, assign an active FE index to every active cell, call DoFHandler::distribute_dofs(), and then assemble a linear system and solve a problem on this finite element space.

      @@ -988,7 +988,7 @@
      -

      Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

      +

      Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

      The exact order in which degrees of freedom on a mesh are ordered, i.e., the order in which basis functions of the finite element space are enumerated, is something that deal.II treats as an implementation detail. By and large, degrees of freedom are enumerated in the same order in which we traverse cells, but you should not rely on any specific numbering. In contrast, if you want a particular ordering, use the functions in namespace DoFRenumbering.

      This function is first discussed in the introduction to the step-2 tutorial program.

      Note
      This function makes a copy of the finite element given as argument, and stores it as a member variable, similarly to the above function set_fe().
      /usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-12-27 18:24:54.380774687 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-12-27 18:24:54.388774742 +0000 @@ -1119,7 +1119,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

      +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

      Definition at line 566 of file dynamic_sparsity_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-12-27 18:24:54.428775017 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-12-27 18:24:54.432775044 +0000 @@ -204,7 +204,7 @@
      template<typename VectorType = Vector<double>>
      class EigenInverse< VectorType >

      Inverse iteration (Wieland) for eigenvalue computations.

      This class implements an adaptive version of the inverse iteration by Wieland.

      -

      There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

      +

      There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

      Usually, the initial guess entering this method is updated after each step, replacing it with the new approximation of the eigenvalue. Using a parameter AdditionalData::relaxation between 0 and 1, this update can be damped. With relaxation parameter 0, no update is performed. This damping allows for slower adaption of the shift value to make sure that the method converges to the eigenvalue closest to the initial guess. This can be aided by the parameter AdditionalData::start_adaption, which indicates the first iteration step in which the shift value should be adapted.

      Definition at line 128 of file eigen.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-12-27 18:24:54.460775236 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-12-27 18:24:54.468775291 +0000 @@ -203,7 +203,7 @@

      Detailed Description

      template<typename VectorType = Vector<double>>
      class EigenPower< VectorType >

      Power method (von Mises) for eigenvalue computations.

      -

      This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

      +

      This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

      A shift parameter allows to shift the spectrum, so it is possible to compute the smallest eigenvalue, too.

      Convergence of this method is known to be slow.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-12-27 18:24:54.512775593 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-12-27 18:24:54.512775593 +0000 @@ -233,15 +233,15 @@
      template<int dim, int spacedim = dim>
      class EllipticalManifold< dim, spacedim >

      Elliptical manifold description derived from ChartManifold. More information on the elliptical coordinate system can be found at Wikipedia .

      This is based on the definition of elliptic coordinates $(u,v)$

      -\[
+<picture><source srcset=\[
  \left\lbrace\begin{aligned}
  x &=  x_0 + c \cosh(u) \cos(v) \\
  y &=  y_0 + c \sinh(u) \sin(v)
  \end{aligned}\right.
-\] +\]" src="form_1530.png"/>

      -

      in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

      -

      The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

      +

      in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

      +

      The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

      The constructor of this class will throw an exception if both dim and spacedim are different from two.

      This manifold can be used to produce hyper_shells with elliptical curvature. As an example, the test elliptical_manifold_01 produces the following triangulation:

      @@ -352,7 +352,7 @@ - +
      centerCenter of the manifold.
      major_axis_directionDirection of the major axis of the manifold.
      eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
      eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
  • @@ -489,7 +489,7 @@

    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -614,7 +614,7 @@

    Return the periodicity associated with the submanifold.

    -

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    +

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    Definition at line 1221 of file manifold_lib.cc.

    @@ -830,7 +830,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -862,24 +862,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -888,11 +888,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 2024-12-27 18:24:54.556775896 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 2024-12-27 18:24:54.560775923 +0000 @@ -178,44 +178,44 @@ class FECouplingValues< dim1, dim2, spacedim >

    FECouplingValues is a class that facilitates the integration of finite element data between two different finite element objects, possibly living on different grids, and with possibly different topological dimensions (i.e., cells, faces, edges, and any combination thereof).

    This class provides a way to simplify the implementation of the following abstract operation:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
-\] +\]" src="form_1096.png"/>

    for three different types of Kernels $K$:

      -
    • $K(x_1, x_2)$ is a non-singular Kernel function, for example, it is a function of positive powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • -
    • $K(x_1, x_2)$ is a singular Kernel function, for example, it is a function of negative powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • -
    • $K(x_1, x_2)$ is a Dirac delta distribution $\delta(x_1-x_2)$, such that the integral above is actually a single integral over the intersection of the two sets $T_1$ and $T_2$.
    • +
    • $K(x_1, x_2)$ is a non-singular Kernel function, for example, it is a function of positive powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • +
    • $K(x_1, x_2)$ is a singular Kernel function, for example, it is a function of negative powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • +
    • $K(x_1, x_2)$ is a Dirac delta distribution $\delta(x_1-x_2)$, such that the integral above is actually a single integral over the intersection of the two sets $T_1$ and $T_2$.

    For the first case, one may think that the only natural way to proceed is to compute the double integral by simply nesting two loops:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
 \approx \sum_{q_1} \sum_{q_2} K(x_1^{q_1}, x_2^{q_2}) \phi^1_i(x_1^{q_1})
 \phi^2_j(x_2^{q_2}) w_1^{q_1} w_2^{q_2},
-\] +\]" src="form_1100.png"/>

    -

    where $x_1^{q_1}$ and $x_2^{q_2}$ are the quadrature points in $T_1$ and $T_2$ respectively, and $w_1^{q_1}$ and $w_2^{q_2}$ are the corresponding quadrature weights.

    -

    This, however is not the only way to proceed. In fact, such an integral can be rewritten as a single loop over corresponding elements of two arrays of points with the same length that can be thought of as a single quadrature rule on the set $T_1\times T_2$. For singular kernels, for example, this is often the only way to proceed, since the quadrature formula on $T_1\times
-T_2$ is usually not written as a tensor product quadrature formula, and one needs to build a custom quadrature formula for this purpose.

    +

    where $x_1^{q_1}$ and $x_2^{q_2}$ are the quadrature points in $T_1$ and $T_2$ respectively, and $w_1^{q_1}$ and $w_2^{q_2}$ are the corresponding quadrature weights.

    +

    This, however is not the only way to proceed. In fact, such an integral can be rewritten as a single loop over corresponding elements of two arrays of points with the same length that can be thought of as a single quadrature rule on the set $T_1\times T_2$. For singular kernels, for example, this is often the only way to proceed, since the quadrature formula on $T_1\times
+T_2$ is usually not written as a tensor product quadrature formula, and one needs to build a custom quadrature formula for this purpose.

    This class allows one to treat the three cases above in the same way, and to approximate the integral as follows:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
 \approx \sum_{i=1}^{N_q} K(x_1^{i}, x_2^{i}) \phi^1_i(x_1^{i})
 \phi^2_j(x_2^{i}) w_1^{i} w_2^i,
-\] +\]" src="form_1107.png"/>

    -

    Since the triple of objects $(\{q\}, \{w\}, \{\phi\})$ is usually provided by a class derived from the FEValuesBase class, this is the type that the class needs at construction time. $T_1$ and $T_2$ can be two arbitrary cells, faces, or edges belonging to possibly different meshes (or to meshes with different topological dimensions), $\phi^1_i$ and $\phi^2_j$ are basis functions defined on $T_1$ and $T_2$, respectively.

    -

    The case of the Dirac distribution is when $T_1$ and $T_2$ correspond to the common face of two neighboring cells. In this case, this class provides a functionality which is similar to the FEInterfaceValues class, and gives you a way to access values of basis functions on the neighboring cells, as well as their gradients and Hessians, in a unified fashion, on the face.

    +

    Since the triple of objects $(\{q\}, \{w\}, \{\phi\})$ is usually provided by a class derived from the FEValuesBase class, this is the type that the class needs at construction time. $T_1$ and $T_2$ can be two arbitrary cells, faces, or edges belonging to possibly different meshes (or to meshes with different topological dimensions), $\phi^1_i$ and $\phi^2_j$ are basis functions defined on $T_1$ and $T_2$, respectively.

    +

    The case of the Dirac distribution is when $T_1$ and $T_2$ correspond to the common face of two neighboring cells. In this case, this class provides a functionality which is similar to the FEInterfaceValues class, and gives you a way to access values of basis functions on the neighboring cells, as well as their gradients and Hessians, in a unified fashion, on the face.

    Similarly, this class can be used to couple bulk and surface meshes across the faces of the bulk mesh. In this case, the two FEValuesBase objects will have different topological dimension (i.e., one will be a cell in a co-dimension one triangulation, and the other a face of a bulk grid with co-dimension zero), and the QuadratureCouplingType argument is usually chosen to be QuadratureCouplingType::reorder, since the quadrature points of the two different FEValuesBase objects are not necessarily generated with the same ordering.

    The type of integral to compute is controlled by the QuadratureCouplingType argument (see the documentation of that enum class for more details), while the type degrees of freedom coupling is controlled by the DoFCouplingType argument (see the documentation of that enum class for more details).

    As an example usage of this class, consider the a bilinear form of the form:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K_1(x_1, x_2) v_i(x_1) u_j(x_2) dT_1 dT_2 +
 \int_{T_1} \int{T_2} K_2(x_1, x_2) p_i(x_1) q_j(x_2) dT_1 dT_2
-\] +\]" src="form_1111.png"/>

    -

    where the finite dimensional space has two scalar components. We indicate with $v_i$ and $p_i$ the trial functions, and with $u_j$ and $q_j$ the corresponding test functions. $K_1$ and $K_2$ are coupling kernels: such a formulation is used, for example, to write the bilinear forms of Galerkin boundary element methods.

    +

    where the finite dimensional space has two scalar components. We indicate with $v_i$ and $p_i$ the trial functions, and with $u_j$ and $q_j$ the corresponding test functions. $K_1$ and $K_2$ are coupling kernels: such a formulation is used, for example, to write the bilinear forms of Galerkin boundary element methods.

    The corresponding implementation would look like the following:

    ... // double loop over cells that yields cell_1 and cell_2
    @@ -338,9 +338,9 @@

    Construct the FECouplingValues with two arbitrary FEValuesBase objects. This class assumes that the FEValuesBase objects that are given at construction time are initialized and ready to use (i.e., that you have called the reinit() function on them before calling this constructor).

    Notice that the actual renumbering of the degrees of freedom and quadrature points is done at construction time, or upon calling the reinit() function. If you change the underlying FEValuesBase objects after construction, you must call the reinit() function to update the renumbering. This may or may not be necessary, depending on the type of coupling that you are using.

    This really depends on the application and on the specific type of coupling. For example, for volume/volume coupling, i.e., for operators with non-local and non-singular kernels of type

    -\[
+<picture><source srcset=\[
 \int_K \int_T f(\phi_i(x)-\phi_j(y), x-y) dx dy
-\] +\]" src="form_1116.png"/>

    you may initialize FECouplingValues once, and just reinit the underlying FEValuesBase objects on different cells K and T, without the need to recompute the coupling (i.e., the numbering is always the same, and nothing differs from what happened in the first call).

    For cell/surface coupling, the same cell may couple with different faces, so the renumbering must be really computed from scratch for each pair of FEValuesBase objects, so reinitializing the underlying cells and faces will make the renumbering itself invalid, and FECouplingValues must be reinitialized (o constructed from scratch) after calling fe_values_1.reinit(K) and fe_values_1.reinit(T).

    @@ -390,9 +390,9 @@

    Reinitialize the FECouplingValues with two arbitrary FEValuesBase objects. The FEValuesBase objects must be initialized and ready to use, i.e., you must have called the reinit() function on them before calling this method.

    This method computes the actual renumbering of the degrees of freedom and quadrature points. If you change the underlying FEValuesBase objects after calling this method, you may need to call the reinit() function to update the renumbering. This may or may not be necessary, depending on the type of coupling that you are using.

    This really depends on the application and on the specific type of coupling. For example, for volume/volume coupling, i.e., for operators with non-local and non-singular kernels of type

    -\[
+<picture><source srcset=\[
 \int_K \int_T f(\phi_i(x)-\phi_j(y), x-y) dx dy
-\] +\]" src="form_1116.png"/>

    you may initialize FECouplingValues once, and just reinit the underlying FEValuesBase objects on different cells K and T, without the need to recompute the coupling (i.e., the numbering is always the same, and nothing differs from what happened in the first call).

    For cell/surface coupling, the same cell may couple with different faces, so the renumbering must be really computed from scratch for each pair of FEValuesBase objects, so reinitializing the underlying cells and faces will make the renumbering itself invalid, and FECouplingValues must be reinitialized (o constructed from scratch) after calling fe_values_1.reinit(K) and fe_values_1.reinit(T).

    @@ -489,8 +489,8 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the two quadrature points in real space at the given quadrature point index, corresponding to a quadrature point in the set $T_1\times
-T_2$.

    +

    Return the two quadrature points in real space at the given quadrature point index, corresponding to a quadrature point in the set $T_1\times
+T_2$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_quadrature_points flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-12-27 18:24:54.668776665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-12-27 18:24:54.676776720 +0000 @@ -463,7 +463,7 @@
    VectorizedArray
    Definition vectorization.h:445
    EvaluationFlags::values
    @ values
    Definition evaluation_flags.h:50

    Likewise, a gradient of the finite element solution represented by vector can be interpolated to the quadrature points by fe_eval.get_gradient(q). The combination of read_dof_values(), evaluate() and get_value() is similar to what FEValues::get_function_values or FEValues::get_function_gradients does, but it is in general much faster because it makes use of the tensor product, see the description of the evaluation routines below, and can do this operation for several cells at once through vectorization.

    -

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    +

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    FEEvaluation<dim,fe_degree> fe_eval(matrix_free);
    Function<dim> &function = ...;
    for (unsigned int cell_index = cell_range.first;
    @@ -1789,8 +1789,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -2114,7 +2114,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -2486,8 +2486,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-12-27 18:24:54.772777379 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-12-27 18:24:54.776777406 +0000 @@ -1131,8 +1131,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -1402,7 +1402,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -1801,8 +1801,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-12-27 18:24:54.844777873 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-12-27 18:24:54.852777928 +0000 @@ -785,8 +785,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-12-27 18:24:54.956778642 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-12-27 18:24:54.960778669 +0000 @@ -1717,8 +1717,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -2078,7 +2078,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -2472,8 +2472,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-12-27 18:24:55.136779879 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-12-27 18:24:55.144779934 +0000 @@ -963,7 +963,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1004,7 +1004,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1043,7 +1043,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1251,11 +1251,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 716 of file fe_values_base.cc.

    @@ -1289,7 +1289,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 763 of file fe_values_base.cc.

    @@ -1466,11 +1466,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1506,7 +1506,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 898 of file fe_values_base.cc.

    @@ -1625,11 +1625,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1670,7 +1670,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1001 of file fe_values_base.cc.

    @@ -1789,11 +1789,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1831,7 +1831,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1994,11 +1994,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2039,7 +2039,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1234 of file fe_values_base.cc.

    @@ -2376,7 +2376,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2433,7 +2433,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2491,7 +2491,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2549,7 +2549,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2607,7 +2607,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2665,8 +2665,8 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-12-27 18:24:55.244780620 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-12-27 18:24:55.248780648 +0000 @@ -667,7 +667,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -706,7 +706,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -743,7 +743,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -937,11 +937,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 487 of file fe_values_base.cc.

    @@ -971,7 +971,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 505 of file fe_values_base.cc.

    @@ -1132,11 +1132,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1168,7 +1168,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 683 of file fe_values_base.cc.

    @@ -1275,11 +1275,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1316,7 +1316,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 786 of file fe_values_base.cc.

    @@ -1423,11 +1423,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1461,7 +1461,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1608,11 +1608,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1649,7 +1649,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1006 of file fe_values_base.cc.

    @@ -1964,7 +1964,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2017,7 +2017,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2071,7 +2071,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2125,7 +2125,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2179,7 +2179,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2233,8 +2233,8 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-12-27 18:24:55.320781142 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-12-27 18:24:55.324781170 +0000 @@ -496,9 +496,9 @@
  • If the q_index and mapping_index arguments to this function are explicitly specified (rather than leaving them at their default values), then these indices will be used to select which element of the hp::QCollection and hp::MappingCollection passed to the constructor should serve as the quadrature and mapping to be used.
  • If one of these arguments is left at its default value, then the function will need to choose a quadrature and/or mapping that is appropriate for the two finite element spaces used on the two cells adjacent to the current interface. As the first choice, if the quadrature or mapping collection we are considering has only one element, then that is clearly the one that should be used.
  • If the quadrature or mapping collection have multiple elements, then we need to dig further. For quadrature objects, we can compare whether the two quadrature objects that correspond to the active_fe_index values of the two adjacent cells are identical (i.e., have quadrature points at the same locations, and have the same weights). If this is so, then it does not matter which one of the two we take, and we choose one or the other.
  • -
  • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
      -
    • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
    • -
    • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • +
    • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
        +
      • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
      • +
      • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • @@ -834,7 +834,7 @@

      Mapped quadrature weight. This value equals the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1104,9 +1104,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      Note that one can define the jump in different ways (the value "there" minus the value "here", or the other way around; both are used in the finite element literature). The definition here uses "value here minus value there", as seen from the first cell.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

      Note
      The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
      @@ -1135,9 +1135,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
-\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

      +

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
+\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
      @@ -1166,9 +1166,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
-u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

      +

      Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
+u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the derivative) of the shape function (singular)".
      @@ -1197,9 +1197,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

      +

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the derivative) of the shape function (singular)".
      @@ -1228,9 +1228,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
-\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

      +

      Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
+\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
      @@ -1259,9 +1259,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
-u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

      +

      Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
+u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values for the gradient) of the shape function (singular)".
      @@ -1290,10 +1290,10 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

      +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the average (singular) of the Hessians (plural: one or two possible values for the second derivatives) of the shape function (singular)".
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-12-27 18:24:55.380781554 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-12-27 18:24:55.372781499 +0000 @@ -475,7 +475,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
      @@ -499,7 +499,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
      @@ -523,8 +523,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
      @@ -548,8 +548,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
      @@ -573,7 +573,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
      @@ -597,7 +597,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
      @@ -621,9 +621,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
      @@ -659,7 +659,7 @@

      Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -732,7 +732,7 @@

      Return the jump in the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -786,7 +786,7 @@

      Return the jump in the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -840,7 +840,7 @@

      Return the jump in the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -894,7 +894,7 @@

      Return the jump in the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -948,7 +948,7 @@

      Return the average of the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1002,7 +1002,7 @@

      Return the average of the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1056,7 +1056,7 @@

      Return the average of the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-12-27 18:24:55.432781911 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-12-27 18:24:55.440781966 +0000 @@ -480,7 +480,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
      @@ -504,8 +504,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
      @@ -551,8 +551,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
      @@ -598,8 +598,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
      @@ -623,8 +623,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
-+ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
++ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
      @@ -648,8 +648,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the gradient (a tensor of rank 2) $\average{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the average of the gradient (a tensor of rank 2) $\average{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
      @@ -673,9 +673,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
      @@ -733,7 +733,7 @@

      Return the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -806,7 +806,7 @@

      Return the jump in the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -860,7 +860,7 @@

      Return the jump in the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -914,7 +914,7 @@

      Return the jump in the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -968,7 +968,7 @@

      Return the jump in the third derivatives of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1022,7 +1022,7 @@

      Return the average of the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1076,7 +1076,7 @@

      Return the average of the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1130,7 +1130,7 @@

      Return the average of the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-12-27 18:24:55.616783175 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-12-27 18:24:55.624783230 +0000 @@ -212,25 +212,25 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FESeries::Fourier< dim, spacedim >

      A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

      -\[
+class FESeries::Fourier< dim, spacedim ></div><p>A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into <a class=Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

      +\[
   \int_0^1 \phi_k(x) \phi_l^\ast(x) dx=\delta_{kl}.
-\] +\]" src="form_1252.png"/>

      -

      Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

      +

      Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

      The arbitrary scalar FE field on the reference element can be expanded in the complete orthogonal exponential basis as

      -\[
+<picture><source srcset=\[
    u({\bf x})
    = \sum_{\bf k} c_{\bf k} \phi_{\bf k}({\bf x}).
-\] +\]" src="form_1254.png"/>

      From the orthogonality property of the basis, it follows that

      -\[
+<picture><source srcset=\[
    c_{\bf k} =
    \int_{[0,1]^d} u({\bf x}) \phi_{\bf k}^\ast ({\bf x}) d{\bf x}\,.
-\] +\]" src="form_1255.png"/>

      -

      It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

      +

      It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

      Definition at line 89 of file fe_series.h.

      Member Typedef Documentation

      @@ -835,7 +835,7 @@
      -

      Angular frequencies $ 2 \pi {\bf k} $ .

      +

      Angular frequencies $ 2 \pi {\bf k} $ .

      Definition at line 195 of file fe_series.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-12-27 18:24:55.660783477 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-12-27 18:24:55.664783505 +0000 @@ -209,39 +209,39 @@
      template<int dim, int spacedim = dim>
      class FESeries::Legendre< dim, spacedim >

      A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into series of Legendre functions on a reference element.

      Legendre functions are solutions to Legendre's differential equation

      -\[
+<picture><source srcset=\[
    \frac{d}{dx}\left([1-x^2] \frac{d}{dx} P_n(x)\right) +
    n[n+1] P_n(x) = 0
-\] +\]" src="form_1261.png"/>

      and can be expressed using Rodrigues' formula

      -\[
+<picture><source srcset=\[
    P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n}[x^2-1]^n.
-\] +\]" src="form_1262.png"/>

      -

      These polynomials are orthogonal with respect to the $ L^2 $ inner product on the interval $ [-1;1] $

      -\[
+<p> These polynomials are orthogonal with respect to the <picture><source srcset=$ L^2 $ inner product on the interval $ [-1;1] $

      +\[
    \int_{-1}^1 P_m(x) P_n(x) = \frac{2}{2n + 1} \delta_{mn}
-\] +\]" src="form_1265.png"/>

      -

      and are complete. A family of $ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

      -\[
+<p> and are complete. A family of <picture><source srcset=$ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

      +\[
    \widetilde P_m = \sqrt{2} P_m(2x-1).
-\] +\]" src="form_1267.png"/>

      -

      An arbitrary scalar FE field on the reference element $ [0;1] $ can be expanded in the complete orthogonal basis as

      -\[
+<p>An arbitrary scalar FE field on the reference element <picture><source srcset=$ [0;1] $ can be expanded in the complete orthogonal basis as

      +\[
    u(x)
    = \sum_{m} c_m \widetilde P_{m}(x).
-\] +\]" src="form_1268.png"/>

      From the orthogonality property of the basis, it follows that

      -\[
+<picture><source srcset=\[
    c_m = \frac{2m+1}{2}
    \int_0^1 u(x) \widetilde P_m(x) dx .
-\] +\]" src="form_1269.png"/>

      -

      This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

      +

      This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

      Definition at line 259 of file fe_series.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-12-27 18:24:55.768784219 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-12-27 18:24:55.772784247 +0000 @@ -991,7 +991,7 @@

      If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -1032,7 +1032,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -1071,7 +1071,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -1279,11 +1279,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 716 of file fe_values_base.cc.

      @@ -1317,7 +1317,7 @@

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 763 of file fe_values_base.cc.

      @@ -1494,11 +1494,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1534,7 +1534,7 @@

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 898 of file fe_values_base.cc.

      @@ -1653,11 +1653,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1698,7 +1698,7 @@

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1001 of file fe_values_base.cc.

      @@ -1817,11 +1817,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1859,7 +1859,7 @@

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2022,11 +2022,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2067,7 +2067,7 @@

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1234 of file fe_values_base.cc.

      @@ -2404,7 +2404,7 @@

      Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

      For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2461,7 +2461,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2519,7 +2519,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2577,7 +2577,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2635,7 +2635,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2693,8 +2693,8 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-12-27 18:24:55.936785373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-12-27 18:24:55.936785373 +0000 @@ -520,11 +520,11 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FESystem< dim, spacedim >

      This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

      FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
      +class FESystem< dim, spacedim >

      This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

      FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
      FE_Q<dim>(1)); // pressure component
      Definition fe_q.h:554
      -

      The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

      +

      The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

      Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

      FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);

      where now each (vector) component of the combined element corresponds to a $Q_1$ space.

      To the outside world, FESystem objects look just like a usual finite element object, they just happen to be composed of several other finite elements that are possibly of different type. These "base elements" can themselves have multiple components and, in particular, could also be vector-valued – for example, if one of the base elements is an FESystem itself (see also below). An example is given in the documentation of namespace FETools::Compositing, when using the "tensor product" strategy.

      @@ -3817,7 +3817,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3919,7 +3919,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4123,8 +4123,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-12-27 18:24:56.044786114 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-12-27 18:24:56.044786114 +0000 @@ -761,7 +761,7 @@

      If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -800,7 +800,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -837,7 +837,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -1031,11 +1031,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 487 of file fe_values_base.cc.

      @@ -1065,7 +1065,7 @@

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 505 of file fe_values_base.cc.

      @@ -1226,11 +1226,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1262,7 +1262,7 @@

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 683 of file fe_values_base.cc.

      @@ -1369,11 +1369,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1410,7 +1410,7 @@

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 786 of file fe_values_base.cc.

      @@ -1517,11 +1517,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1555,7 +1555,7 @@

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1702,11 +1702,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1743,7 +1743,7 @@

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1006 of file fe_values_base.cc.

      @@ -2058,7 +2058,7 @@

      Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

      For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2111,7 +2111,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2165,7 +2165,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2219,7 +2219,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2273,7 +2273,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2327,8 +2327,8 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-12-27 18:24:56.144786801 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-12-27 18:24:56.148786828 +0000 @@ -650,7 +650,7 @@

      If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -684,7 +684,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -716,7 +716,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -882,11 +882,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 716 of file fe_values_base.cc.

      @@ -913,7 +913,7 @@ std::vector< Vector< Number > > & values&#href_anchor"memdoc">

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 763 of file fe_values_base.cc.

      @@ -1061,11 +1061,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1094,7 +1094,7 @@ std::vector< std::vector< Tensor< 1, spacedim, Number > > > & gradients&#href_anchor"memdoc">

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 898 of file fe_values_base.cc.

      @@ -1192,11 +1192,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1230,7 +1230,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1001 of file fe_values_base.cc.

      @@ -1328,11 +1328,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1363,7 +1363,7 @@ std::vector< Vector< Number > > & laplacians&#href_anchor"memdoc">

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1497,11 +1497,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1535,7 +1535,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1234 of file fe_values_base.cc.

      @@ -1800,7 +1800,7 @@

      Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

      For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1841,7 +1841,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1883,7 +1883,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1925,7 +1925,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1967,7 +1967,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2009,8 +2009,8 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 2024-12-27 18:24:56.184787075 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 2024-12-27 18:24:56.192787130 +0000 @@ -377,7 +377,7 @@

      Return the values of the underlying view characterized by fe_function at the renumbered quadrature points.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected view.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -446,7 +446,7 @@

      Return the gradients of the underlying view characterized by fe_function at the renumbered quadrature points.

      This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected view.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., value_type) times the type used to store the gradients of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., value_type) times the type used to store the gradients of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-12-27 18:24:56.228787378 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-12-27 18:24:56.236787433 +0000 @@ -708,7 +708,7 @@

      Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 361 of file fe_values_views.cc.

      @@ -781,7 +781,7 @@

      Return the gradients of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 412 of file fe_values_views.cc.

      @@ -840,7 +840,7 @@

      Return the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 464 of file fe_values_views.cc.

      @@ -899,7 +899,7 @@

      Return the Laplacians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

      This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 516 of file fe_values_views.cc.

      @@ -958,7 +958,7 @@

      Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 568 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.268787653 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.272787680 +0000 @@ -168,9 +168,9 @@

      Detailed Description

      template<int dim, int spacedim>
      class FEValuesViews::SymmetricTensor< 2, dim, spacedim >

      A class representing a view to a set of (dim*dim + dim)/2 components forming a symmetric second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems topic.

      -

      This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
-i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
-\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

      +

      This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
+i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
+\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

      You get an object of this type if you apply a FEValuesExtractors::SymmetricTensor to an FEValues, FEFaceValues or FESubfaceValues object.

      Definition at line 1315 of file fe_values_views.h.

      @@ -497,7 +497,7 @@

      Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1049 of file fe_values_views.cc.

      @@ -571,7 +571,7 @@

      Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      See the general discussion of this class for a definition of the divergence.

      -

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1099 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.304787900 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.304787900 +0000 @@ -181,8 +181,8 @@

      Detailed Description

      template<int dim, int spacedim>
      class FEValuesViews::Tensor< 2, dim, spacedim >

      A class representing a view to a set of dim*dim components forming a second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems topic.

      -

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
-\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

      +

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
+\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

      You get an object of this type if you apply a FEValuesExtractors::Tensor to an FEValues, FEFaceValues or FESubfaceValues object.

      Definition at line 1624 of file fe_values_views.h.

      @@ -603,7 +603,7 @@

      Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1153 of file fe_values_views.cc.

      @@ -677,7 +677,7 @@

      Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      See the general discussion of this class for a definition of the divergence.

      -

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1203 of file fe_values_views.cc.

      @@ -736,7 +736,7 @@

      Return the gradient of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      See the general discussion of this class for a definition of the gradient.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1256 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-12-27 18:24:56.352788230 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-12-27 18:24:56.356788257 +0000 @@ -236,8 +236,8 @@
      template<int dim, int spacedim = dim>
      class FEValuesViews::Vector< dim, spacedim >

      A class representing a view to a set of spacedim components forming a vector part of a vector-valued finite element. Views are discussed in the Handling vector valued problems topic.

      Note that in the current context, a vector is meant in the sense physics uses it: it has spacedim components that behave in specific ways under coordinate system transformations. Examples include velocity or displacement fields. This is opposed to how mathematics uses the word "vector" (and how we use this word in other contexts in the library, for example in the Vector class), where it really stands for a collection of numbers. An example of this latter use of the word could be the set of concentrations of chemical species in a flame; however, these are really just a collection of scalar variables, since they do not change if the coordinate system is rotated, unlike the components of a velocity vector, and consequently, this class should not be used for this context.

      -

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
-\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

      +

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
+\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

      You get an object of this type if you apply a FEValuesExtractors::Vector to an FEValues, FEFaceValues or FESubfaceValues object.

      Definition at line 597 of file fe_values_views.h.

      @@ -820,7 +820,7 @@ const unsigned int q_point&#href_anchor"memdoc">

      Return the symmetric gradient (a symmetric tensor of rank 2) of the vector component selected by this view, for the shape function and quadrature point selected by the arguments.

      The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
-(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

      +(\nabla \phi_i(x_q))^T]$" src="form_1335.png"/>, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

      Note
      The meaning of the arguments is as documented for the value() function.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -958,7 +958,7 @@

      Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 622 of file fe_values_views.cc.

      @@ -1031,7 +1031,7 @@

      Return the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 672 of file fe_values_views.cc.

      @@ -1092,7 +1092,7 @@

      The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
 v^T)$.

      Note
      There is no equivalent function such as FEValuesBase::get_function_symmetric_gradients in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.
      -

      The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 724 of file fe_values_views.cc.

      @@ -1151,7 +1151,7 @@

      Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      -

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 778 of file fe_values_views.cc.

      @@ -1210,7 +1210,7 @@

      Return the curl of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_curls in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      -

      The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 831 of file fe_values_views.cc.

      @@ -1269,7 +1269,7 @@

      Return the Hessians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 883 of file fe_values_views.cc.

      @@ -1328,7 +1328,7 @@

      Return the Laplacians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

      This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 935 of file fe_values_views.cc.

      @@ -1387,7 +1387,7 @@

      Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 995 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-12-27 18:24:56.496789218 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-12-27 18:24:56.508789301 +0000 @@ -764,11 +764,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3374,7 +3374,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3476,7 +3476,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3766,8 +3766,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-12-27 18:24:56.664790372 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-12-27 18:24:56.668790400 +0000 @@ -740,11 +740,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3312,7 +3312,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3414,7 +3414,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3704,8 +3704,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-12-27 18:24:56.808791361 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-12-27 18:24:56.816791416 +0000 @@ -740,11 +740,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3282,7 +3282,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3384,7 +3384,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3674,8 +3674,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-12-27 18:24:56.960792405 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-12-27 18:24:56.968792460 +0000 @@ -2384,17 +2384,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2427,21 +2427,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3518,7 +3518,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3620,7 +3620,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3881,8 +3881,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3916,11 +3916,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-12-27 18:24:57.108793421 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-12-27 18:24:57.112793449 +0000 @@ -3156,7 +3156,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3258,7 +3258,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3548,8 +3548,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3583,11 +3583,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-12-27 18:24:57.264794493 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-12-27 18:24:57.260794465 +0000 @@ -3156,7 +3156,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3258,7 +3258,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3548,8 +3548,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3583,11 +3583,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-12-27 18:24:57.404795454 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-12-27 18:24:57.408795481 +0000 @@ -491,24 +491,24 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      class FE_DGP< dim, spacedim >

      Discontinuous finite elements based on Legendre polynomials.

      -

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      -

      The basis functions used in this element for the space described above are chosen to form a Legendre basis on the unit square, i.e., in particular they are $L_2$-orthogonal and normalized on the reference cell (but not necessarily on the real cell). As a consequence, the first basis function of this element is always the function that is constant and equal to one, regardless of the polynomial degree of the element. In addition, as a result of the orthogonality of the basis functions, the mass matrix is diagonal if the grid cells are parallelograms. Note that this is in contrast to the FE_DGPMonomial class that actually uses the monomial basis listed above as basis functions, without transformation from reference to real cell.

      +

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      +

      The basis functions used in this element for the space described above are chosen to form a Legendre basis on the unit square, i.e., in particular they are $L_2$-orthogonal and normalized on the reference cell (but not necessarily on the real cell). As a consequence, the first basis function of this element is always the function that is constant and equal to one, regardless of the polynomial degree of the element. In addition, as a result of the orthogonality of the basis functions, the mass matrix is diagonal if the grid cells are parallelograms. Note that this is in contrast to the FE_DGPMonomial class that actually uses the monomial basis listed above as basis functions, without transformation from reference to real cell.

      The shape functions are defined in the class PolynomialSpace. The polynomials used inside PolynomialSpace are Polynomials::Legendre up to degree p given in FE_DGP. For the ordering of the basis functions, refer to PolynomialSpace, remembering that the Legendre polynomials are ordered by ascending degree.

      Note
      This element is not defined by finding shape functions within the given function space that interpolate a particular set of points. Consequently, there are no support points to which a given function could be interpolated; finding a finite element function that approximates a given function is therefore only possible through projection, rather than interpolation. Secondly, the shape functions of this element do not jointly add up to one. As a consequence of this, adding or subtracting a constant value – such as one would do to make a function have mean value zero – can not be done by simply subtracting the constant value from each degree of freedom. Rather, one needs to use the fact that the first basis function is constant equal to one and simply subtract the constant from the value of the degree of freedom corresponding to this first shape function on each cell.
      This class is only partially implemented for the codimension one case (spacedim != dim ), since no passage of information between meshes of different refinement level is possible because the embedding and projection matrices are not computed in the class constructor.

      Transformation properties

      -

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      -

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      +

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      +

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      -

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
-y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      -

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      +

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      +

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      Visualization of shape functions

      In 2d, the shape functions of this element look as follows.

      -

      $P_0$ element

      +

      $P_0$ element

      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      -
      @@ -517,11 +517,11 @@

      $P_0$ element, shape function 0

      +

      $P_0$ element, shape function 0

      -

      $P_1$ element

      +

      $P_1$ element

      - @@ -545,11 +545,11 @@

      -
      @@ -533,9 +533,9 @@

      $P_1$ element, shape function 0

      +

      $P_1$ element, shape function 0

      -

      $P_1$ element, shape function 1

      +

      $P_1$ element, shape function 1

      $P_1$ element, shape function 2

      +

      $P_1$ element, shape function 2

      -

      $P_2$ element

      +

      $P_2$ element

      - @@ -576,9 +576,9 @@ - @@ -591,11 +591,11 @@ - +
      @@ -561,9 +561,9 @@

      $P_2$ element, shape function 0

      +

      $P_2$ element, shape function 0

      -

      $P_2$ element, shape function 1

      +

      $P_2$ element, shape function 1

      $P_2$ element, shape function 2

      +

      $P_2$ element, shape function 2

      -

      $P_2$ element, shape function 3

      +

      $P_2$ element, shape function 3

      $P_2$ element, shape function 4

      +

      $P_2$ element, shape function 4

      -
      $P_2$ element, shape function 5
      $P_2$ element, shape function 5
      -

      $P_3$ element

      +

      $P_3$ element

      - @@ -622,9 +622,9 @@ - @@ -637,9 +637,9 @@ - @@ -652,9 +652,9 @@ - @@ -667,11 +667,11 @@ - +
      @@ -607,9 +607,9 @@

      $P_3$ element, shape function 0

      +

      $P_3$ element, shape function 0

      -

      $P_3$ element, shape function 1

      +

      $P_3$ element, shape function 1

      $P_3$ element, shape function 2

      +

      $P_3$ element, shape function 2

      -

      $P_3$ element, shape function 3

      +

      $P_3$ element, shape function 3

      $P_3$ element, shape function 4

      +

      $P_3$ element, shape function 4

      -

      $P_3$ element, shape function 5

      +

      $P_3$ element, shape function 5

      $P_3$ element, shape function 6

      +

      $P_3$ element, shape function 6

      -

      $P_3$ element, shape function 7

      +

      $P_3$ element, shape function 7

      $P_3$ element, shape function 8

      +

      $P_3$ element, shape function 8

      -
      $P_3$ element, shape function 9
      $P_3$ element, shape function 9
      -

      $P_4$ element

      +

      $P_4$ element

      - @@ -698,9 +698,9 @@ -
      @@ -683,9 +683,9 @@

      $P_4$ element, shape function 0

      +

      $P_4$ element, shape function 0

      -

      $P_4$ element, shape function 1

      +

      $P_4$ element, shape function 1

      $P_4$ element, shape function 2

      +

      $P_4$ element, shape function 2

      -

      $P_4$ element, shape function 3

      +

      $P_4$ element, shape function 3

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-12-27 18:24:57.556796498 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-12-27 18:24:57.564796553 +0000 @@ -504,21 +504,21 @@

      Detailed Description

      template<int dim>
      class FE_DGPMonomial< dim >

      Discontinuous finite elements based on monomials.

      -

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      +

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      The basis functions for this element are chosen to be the monomials listed above. Note that this is the main difference to the FE_DGP class that uses a set of polynomials of complete degree p that form a Legendre basis on the unit square. Thus, there, the mass matrix is diagonal, if the grid cells are parallelograms. The basis here does not have this property; however, it is simpler to compute. On the other hand, this element has the additional disadvantage that the local cell matrices usually have a worse condition number than the ones originating from the FE_DGP element.

      This class is not implemented for the codimension one case (spacedim != dim).

      Transformation properties

      -

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      -

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      +

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      +

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      -

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
-y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      -

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      +

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      +

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      Visualization of shape functions

      In 2d, the shape functions of this element look as follows.

      -

      $P_0$ element

      +

      $P_0$ element

      -
      @@ -527,11 +527,11 @@

      $P_0$ element, shape function 0

      +

      $P_0$ element, shape function 0

      -

      $P_1$ element

      +

      $P_1$ element

      - @@ -555,11 +555,11 @@

      -
      @@ -543,9 +543,9 @@

      $P_1$ element, shape function 0

      +

      $P_1$ element, shape function 0

      -

      $P_1$ element, shape function 1

      +

      $P_1$ element, shape function 1

      $P_1$ element, shape function 2

      +

      $P_1$ element, shape function 2

      -

      $P_2$ element

      +

      $P_2$ element

      - @@ -586,9 +586,9 @@ - @@ -601,11 +601,11 @@ - +
      @@ -571,9 +571,9 @@

      $P_2$ element, shape function 0

      +

      $P_2$ element, shape function 0

      -

      $P_2$ element, shape function 1

      +

      $P_2$ element, shape function 1

      $P_2$ element, shape function 2

      +

      $P_2$ element, shape function 2

      -

      $P_2$ element, shape function 3

      +

      $P_2$ element, shape function 3

      $P_2$ element, shape function 4

      +

      $P_2$ element, shape function 4

      -
      $P_2$ element, shape function 5
      $P_2$ element, shape function 5
      -

      $P_3$ element

      +

      $P_3$ element

      - @@ -632,9 +632,9 @@ - @@ -647,9 +647,9 @@ - @@ -662,9 +662,9 @@ - @@ -677,11 +677,11 @@ - +
      @@ -617,9 +617,9 @@

      $P_3$ element, shape function 0

      +

      $P_3$ element, shape function 0

      -

      $P_3$ element, shape function 1

      +

      $P_3$ element, shape function 1

      $P_3$ element, shape function 2

      +

      $P_3$ element, shape function 2

      -

      $P_3$ element, shape function 3

      +

      $P_3$ element, shape function 3

      $P_3$ element, shape function 4

      +

      $P_3$ element, shape function 4

      -

      $P_3$ element, shape function 5

      +

      $P_3$ element, shape function 5

      $P_3$ element, shape function 6

      +

      $P_3$ element, shape function 6

      -

      $P_3$ element, shape function 7

      +

      $P_3$ element, shape function 7

      $P_3$ element, shape function 8

      +

      $P_3$ element, shape function 8

      -
      $P_3$ element, shape function 9
      $P_3$ element, shape function 9
      -

      $P_4$ element

      +

      $P_4$ element

      - @@ -708,9 +708,9 @@ - @@ -723,9 +723,9 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-12-27 18:24:57.708797542 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-12-27 18:24:57.716797597 +0000 @@ -499,7 +499,7 @@

      Besides, this class is not implemented for the codimension one case (spacedim != dim).

      Visualization of shape functions

      In 2d, the shape functions of this element look as follows.

      -

      $P_0$ element

      +

      $P_0$ element

      @@ -693,9 +693,9 @@

      $P_4$ element, shape function 0

      +

      $P_4$ element, shape function 0

      -

      $P_4$ element, shape function 1

      +

      $P_4$ element, shape function 1

      $P_4$ element, shape function 2

      +

      $P_4$ element, shape function 2

      -

      $P_4$ element, shape function 3

      +

      $P_4$ element, shape function 3

      -
      @@ -508,11 +508,11 @@

      $P_0$ element, shape function 0

      +

      $P_0$ element, shape function 0

      -

      $P_1$ element

      +

      $P_1$ element

      - @@ -536,11 +536,11 @@

      -
      @@ -524,9 +524,9 @@

      $P_1$ element, shape function 0

      +

      $P_1$ element, shape function 0

      -

      $P_1$ element, shape function 1

      +

      $P_1$ element, shape function 1

      $P_1$ element, shape function 2

      +

      $P_1$ element, shape function 2

      -

      $P_2$ element

      +

      $P_2$ element

      - @@ -567,9 +567,9 @@ - @@ -582,11 +582,11 @@ - +
      @@ -552,9 +552,9 @@

      $P_2$ element, shape function 0

      +

      $P_2$ element, shape function 0

      -

      $P_2$ element, shape function 1

      +

      $P_2$ element, shape function 1

      $P_2$ element, shape function 2

      +

      $P_2$ element, shape function 2

      -

      $P_2$ element, shape function 3

      +

      $P_2$ element, shape function 3

      $P_2$ element, shape function 4

      +

      $P_2$ element, shape function 4

      -
      $P_2$ element, shape function 5
      $P_2$ element, shape function 5
      -

      $P_3$ element

      +

      $P_3$ element

      - @@ -613,9 +613,9 @@ - @@ -628,9 +628,9 @@ - @@ -643,9 +643,9 @@ - @@ -658,11 +658,11 @@ - +
      @@ -598,9 +598,9 @@

      $P_3$ element, shape function 0

      +

      $P_3$ element, shape function 0

      -

      $P_3$ element, shape function 1

      +

      $P_3$ element, shape function 1

      $P_3$ element, shape function 2

      +

      $P_3$ element, shape function 2

      -

      $P_3$ element, shape function 3

      +

      $P_3$ element, shape function 3

      $P_3$ element, shape function 4

      +

      $P_3$ element, shape function 4

      -

      $P_3$ element, shape function 5

      +

      $P_3$ element, shape function 5

      $P_3$ element, shape function 6

      +

      $P_3$ element, shape function 6

      -

      $P_3$ element, shape function 7

      +

      $P_3$ element, shape function 7

      $P_3$ element, shape function 8

      +

      $P_3$ element, shape function 8

      -
      $P_3$ element, shape function 9
      $P_3$ element, shape function 9
      -

      $P_4$ element

      +

      $P_4$ element

      - @@ -689,9 +689,9 @@ - @@ -704,9 +704,9 @@ - @@ -719,9 +719,9 @@ - /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-12-27 18:24:57.852798531 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-12-27 18:24:57.856798558 +0000 @@ -530,7 +530,7 @@ *

      with node 13 being placed in the interior of the hex.

      Note, however, that these are just the Lagrange interpolation points of the shape functions. Even though they may physically be on the boundary of the cell, they are logically in the interior since there are no continuity requirements for these shape functions across cell boundaries. While discontinuous, when restricted to a single cell the shape functions of this element are exactly the same as those of the FE_Q element where they are shown visually.

      Unit support point distribution and conditioning of interpolation

      -

      When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      +

      When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

      Definition at line 111 of file fe_dgq.h.

      @@ -2294,17 +2294,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2337,21 +2337,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3481,7 +3481,7 @@
      @@ -674,9 +674,9 @@

      $P_4$ element, shape function 0

      +

      $P_4$ element, shape function 0

      -

      $P_4$ element, shape function 1

      +

      $P_4$ element, shape function 1

      $P_4$ element, shape function 2

      +

      $P_4$ element, shape function 2

      -

      $P_4$ element, shape function 3

      +

      $P_4$ element, shape function 3

      $P_4$ element, shape function 4

      +

      $P_4$ element, shape function 4

      -

      $P_4$ element, shape function 5

      +

      $P_4$ element, shape function 5

      $P_4$ element, shape function 6

      +

      $P_4$ element, shape function 6

      -

      $P_4$ element, shape function 7

      +

      $P_4$ element, shape function 7

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3583,7 +3583,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3844,8 +3844,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-12-27 18:24:57.992799492 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-12-27 18:24:58.000799547 +0000 @@ -2188,17 +2188,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2231,21 +2231,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3375,7 +3375,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3477,7 +3477,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3738,8 +3738,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-12-27 18:24:58.144800536 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-12-27 18:24:58.144800536 +0000 @@ -2190,17 +2190,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2233,21 +2233,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3377,7 +3377,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3479,7 +3479,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3740,8 +3740,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-12-27 18:24:58.296801580 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-12-27 18:24:58.288801525 +0000 @@ -2190,17 +2190,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2233,21 +2233,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3377,7 +3377,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3479,7 +3479,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3740,8 +3740,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-12-27 18:24:58.436802541 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-12-27 18:24:58.444802596 +0000 @@ -3156,7 +3156,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3258,7 +3258,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3548,8 +3548,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3583,11 +3583,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-12-27 18:24:58.588803585 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-12-27 18:24:58.600803668 +0000 @@ -3173,7 +3173,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3275,7 +3275,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3565,8 +3565,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3600,11 +3600,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-12-27 18:24:58.740804629 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-12-27 18:24:58.744804656 +0000 @@ -500,12 +500,12 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      class FE_Enriched< dim, spacedim >

      Implementation of a partition of unity finite element method (PUM) by Babuska and Melenk which enriches a standard finite element with an enrichment function multiplied with another (usually linear) finite element:

      -\[
+<picture><source srcset=\[
 U(\mathbf x) = \sum_i N_i(\mathbf x) U_i + \sum_j N_j(\mathbf x) \sum_k
-F_k(\mathbf x) U_{jk} \] +F_k(\mathbf x) U_{jk} \]" src="form_1146.png"/>

      -

      where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
-U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

      +

      where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
+U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

      The simplest constructor of this class takes two finite element objects and an enrichment function to be used. For example

      @@ -513,7 +513,7 @@
      Definition fe_q.h:554

      In this case, standard DoFs are distributed by FE_Q<dim>(2), whereas enriched DoFs are coming from a single finite element FE_Q<dim>(1) used with a single enrichment function function. In this case, the total number of DoFs on the enriched element is the sum of DoFs from FE_Q<dim>(2) and FE_Q<dim>(1).

      -

      As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      +

      As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

      @@ -526,7 +526,7 @@
      1d element, base and enriched shape functions. enriched shape function corresponding to the central vertex.

      Note that evaluation of gradients (hessians) of the enriched shape functions or the finite element field requires evaluation of gradients (gradients and hessians) of the enrichment functions:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   U(\mathbf x)
     &= \sum_i N_i(\mathbf x) U_i
     + \sum_{j,k} N_j(\mathbf x) F_k(\mathbf x) U_{jk} \\
@@ -540,10 +540,10 @@
 F_k(\mathbf x) + \mathbf \nabla F_k(\mathbf x) \mathbf \nabla N_j(\mathbf x)
 + \mathbf \nabla N_j(\mathbf x) \mathbf \nabla F_k(\mathbf x) + N_j(\mathbf
 x) \mathbf \nabla \mathbf \nabla F_k(\mathbf x) \right] U_{jk}
-\end{align*} +\end{align*}" src="form_1155.png"/>

      Using enriched and non-enriched FEs together

      -

      In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

      FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));
      +

      In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

      FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));

      This constructor is equivalent to calling

      FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1),
      FE_Nothing<dim>(1,true),
      nullptr);
      @@ -3233,7 +3233,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3335,7 +3335,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3625,8 +3625,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3660,11 +3660,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-12-27 18:24:58.888805645 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-12-27 18:24:58.892805673 +0000 @@ -3240,7 +3240,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3342,7 +3342,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3603,8 +3603,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3638,11 +3638,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.028806607 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.032806634 +0000 @@ -3449,7 +3449,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3551,7 +3551,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3808,8 +3808,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3843,11 +3843,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-12-27 18:24:59.180807651 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-12-27 18:24:59.176807623 +0000 @@ -3287,7 +3287,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3389,7 +3389,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3650,8 +3650,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.316808585 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.328808667 +0000 @@ -3000,7 +3000,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3102,7 +3102,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3359,8 +3359,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3394,11 +3394,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 2024-12-27 18:24:59.480809711 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 2024-12-27 18:24:59.476809683 +0000 @@ -493,8 +493,8 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_Hermite< dim, spacedim >

      This class implements a Hermite interpolation basis of maximum regularity elements (see [CiarletRiavart1972interpolation]). These bases are always of odd polynomial degree, have regularity $r=\frac{p-1}{2}$ and are defined up to polynomial degree $p=13$, with larger degrees currently being ill-conditioned.

      -

      Each node has $(r+1)^{d}$ degrees of freedom (DoFs) assigned to it, corresponding to various derivatives up to order $r$ in each direction. DoFs at each node are not consecutive in lexicographic ordering for $d>1$ due to the tensor product construction of the basis. The ordering is determined by the direction of the derivative each function corresponds to; first by $x$-derivatives, then $y$, then $z$. Locally over each element the DoFs are ordered similarly. See below for the local ordering for $r=1$, where DoFs are ordered from 0 to $(2r+2)^{d}-1$:

      +class FE_Hermite< dim, spacedim >

      This class implements a Hermite interpolation basis of maximum regularity elements (see [CiarletRiavart1972interpolation]). These bases are always of odd polynomial degree, have regularity $r=\frac{p-1}{2}$ and are defined up to polynomial degree $p=13$, with larger degrees currently being ill-conditioned.

      +

      Each node has $(r+1)^{d}$ degrees of freedom (DoFs) assigned to it, corresponding to various derivatives up to order $r$ in each direction. DoFs at each node are not consecutive in lexicographic ordering for $d>1$ due to the tensor product construction of the basis. The ordering is determined by the direction of the derivative each function corresponds to; first by $x$-derivatives, then $y$, then $z$. Locally over each element the DoFs are ordered similarly. See below for the local ordering for $r=1$, where DoFs are ordered from 0 to $(2r+2)^{d}-1$:

      FE_Hermite<1>(3)

      * (0)________________(2)
       * (1)                (3)
      @@ -2037,17 +2037,17 @@
       

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2080,21 +2080,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3457,7 +3457,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3559,7 +3559,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3849,8 +3849,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3884,11 +3884,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -5180,7 +5180,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      -

      Variable storing the order of the highest derivative that the current FE_Hermite object can enforce continuity for. Here the order of derivative only counts in one spatial direction, so the derivative $\frac{d^{2}f}{dx \; dy}$ would be counted as a first order derivative of $f$, as an example.

      +

      Variable storing the order of the highest derivative that the current FE_Hermite object can enforce continuity for. Here the order of derivative only counts in one spatial direction, so the derivative $\frac{d^{2}f}{dx \; dy}$ would be counted as a first order derivative of $f$, as an example.

      Definition at line 262 of file fe_hermite.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-12-27 18:24:59.632810754 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-12-27 18:24:59.640810809 +0000 @@ -532,12 +532,12 @@

      Detailed Description

      template<int dim>
      class FE_Nedelec< dim >
      Warning
      Several aspects of the implementation are experimental. For the moment, it is safe to use the element on globally refined meshes with consistent orientation of faces. See the todo entries below for more detailed caveats.
      -

      Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

      -

      The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

      +

      Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

      +

      The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

      Other properties of the Nédélec element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one.

      We follow the commonly used – though confusing – definition of the "degree" of Nédélec elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_Nedelec(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

      -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -545,7 +545,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1195.png"/>

      Note that this follows the convention of Brezzi and Raviart, though not the one used in the original paper by Nédélec.

      This class is not implemented for the codimension one case (spacedim != dim).

      @@ -1405,11 +1405,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -4104,7 +4104,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4206,7 +4206,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4467,8 +4467,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-12-27 18:24:59.784811799 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-12-27 18:24:59.784811799 +0000 @@ -2985,7 +2985,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3087,7 +3087,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3377,8 +3377,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3412,11 +3412,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-12-27 18:24:59.828812101 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-12-27 18:24:59.832812128 +0000 @@ -160,9 +160,9 @@ class FE_NedelecSZ< dim, spacedim >::InternalData

      Derived Internal data which is used to store cell-independent data. Note that due to the nature of this element, a number of useful pre-computed quantities are stored for the computation of cell-dependent shape functions.

      The main quantities which are stored are associated with edge and face parameterizations. These are:

      • -$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
      • +$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
      • -$\sigma_{i}$ - linear functional associated with the $i$-th vertex.
      • +$\sigma_{i}$ - linear functional associated with the $i$-th vertex.

      The definitions of these functionals, as well as the edge and face parameterizations and edge and face extension parameters, can be found on page 82 of Zaglmayr's thesis. The details of the definition of the globally-defined edge and face orientations can be found on page 67.

      @@ -295,9 +295,9 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Storage for all possible edge parameterization between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent.

      -

      The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      -

      sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

      -

      Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

      +

      The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      +

      sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

      +

      Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

      Definition at line 354 of file fe_nedelec_sz.h.

      @@ -317,8 +317,8 @@

      Storage for gradients of all possible edge parameterizations between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent. Note that the components of the gradient are constant.

      -

      The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      -

      sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

      +

      The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      +

      sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

      Note that the gradient of the edge parameterization is constant on an edge, so we do not need to store it at every quadrature point.

      Definition at line 371 of file fe_nedelec_sz.h.

      @@ -381,10 +381,10 @@

      Storage for edge extension parameters at quadrature points. These are stored for the 12 edges such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

      -

      The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
-\lambda_{j}$.

      -

      Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

      -

      edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

      +

      The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
+\lambda_{j}$.

      +

      Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

      +

      edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

      Definition at line 414 of file fe_nedelec_sz.h.

      @@ -404,7 +404,7 @@

      Storage for gradients of edge extension parameters in 2d. In this case they are constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

      -

      edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

      +

      edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

      Definition at line 425 of file fe_nedelec_sz.h.

      @@ -424,7 +424,7 @@

      Storage for gradients of edge extension parameters in 3d. In this case they are non-constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

      -

      edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

      +

      edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

      Definition at line 436 of file fe_nedelec_sz.h.

      @@ -444,7 +444,7 @@

      Storage for 2nd derivatives of edge extension parameters in 3d, which are constant across the cell. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

      -

      edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

      +

      edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

      Definition at line 448 of file fe_nedelec_sz.h.

      @@ -464,10 +464,10 @@

      Storage for the face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

      -

      The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
-\lambda_{v4}$.

      -

      Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

      -

      face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

      +

      The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
+\lambda_{v4}$.

      +

      Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

      +

      face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

      Definition at line 466 of file fe_nedelec_sz.h.

      @@ -487,7 +487,7 @@

      Storage for gradients of face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

      -

      face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

      +

      face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

      Definition at line 476 of file fe_nedelec_sz.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-12-27 18:24:59.968813062 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-12-27 18:24:59.976813117 +0000 @@ -482,7 +482,7 @@ class FE_Nothing< dim, spacedim >

      Definition of a finite element space with zero degrees of freedom and that, consequently, can only represent a single function: the zero function.

      This class is useful (in the context of an hp-method) to represent empty cells in the triangulation on which no degrees of freedom should be allocated, or to describe a field that is extended by zero to a part of the domain where we don't need it. Thus a triangulation may be divided into two regions: an active region where normal elements are used, and an inactive region where FE_Nothing elements are used. The DoFHandler will therefore assign no degrees of freedom to the FE_Nothing cells, and this subregion is therefore implicitly deleted from the computation. step-10 and step-46 show use cases for this element. An interesting application for this element is also presented in the paper [Cangiani2012].

      FE_Nothing as seen as a function space

      -

      Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

      +

      Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

      FE_Nothing in combination with other elements

      In situations such as those of step-46, one uses FE_Nothing on cells where one is not interested in a solution variable. For example, in fluid structure interaction problems, the fluid velocity is only defined on cells inside the fluid part of the domain. One then uses FE_Nothing on cells in the solid part of the domain to describe the finite element space for the velocity. In other words, the velocity lives everywhere conceptually, but it is identically zero in those parts of the domain where it is not of interest and doesn't use up any degrees of freedom there.

      The question is what happens at the interface between areas where one is interested in the solution (and uses a "normal" finite element) and where one is not interested (and uses FE_Nothing): Should the solution at that interface be zero – i.e., we consider a "continuous" finite element field that happens to be zero in that area where FE_Nothing is used – or is there no requirement for continuity at the interface. In the deal.II language, this is encoded by what the function FiniteElement::compare_for_domination() returns: If the FE_Nothing "dominates", then the solution must be zero at the interface; if it does not, then there is no requirement and one can think of FE_Nothing as a function space that is in general discontinuous (i.e., there is no requirement for any kind of continuity at cell interfaces) but on every cell equal to zero.

      @@ -2888,7 +2888,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2990,7 +2990,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3251,8 +3251,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3286,11 +3286,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-12-27 18:25:00.112814051 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-12-27 18:25:00.120814106 +0000 @@ -489,13 +489,13 @@

      Detailed Description

      Implementation of the scalar version of the P1 nonconforming finite element, a piecewise linear element on quadrilaterals in 2d. This implementation is only for 2d cells in a 2d space (i.e., codimension 0).

      Unlike the usual continuous, $H^1$ conforming finite elements, the P1 nonconforming element does not enforce continuity across edges. However, it requires the continuity in an integral sense: any function in the space should have the same integral values on two sides of the common edge shared by two adjacent elements.

      -

      Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

      +

      Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

      Dice Rule

      Since any function in the P1 nonconforming space is piecewise linear on each element, the function value at the midpoint of each edge is same as the mean value on the edge. Thus the continuity of the integral value across each edge is equivalent to the continuity of the midpoint value of each edge in this case.

      Thus for the P1 nonconforming element, the function values at midpoints on edges of a cell are important. The first attempt to define (local) degrees of freedom (DoFs) on a quadrilateral is by using midpoint values of a function.

      However, these 4 functionals are not linearly independent because a linear function on 2d is uniquely determined by only 3 independent values. A simple observation reads that any linear function on a quadrilateral should satisfy the 'dice rule': the sum of two function values at the midpoints of the edge pair on opposite sides of a cell is equal to the sum of those at the midpoints of the other edge pair. This is called the 'dice rule' because the number of points on opposite sides of a dice always adds up to the same number as well (in the case of dice, to seven).

      -

      In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
-  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

      +

      In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
+  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

      Conversely if 4 values at midpoints satisfying the dice rule are given, then there always exists the unique linear function which coincides with 4 midpoints values.

      Due to the dice rule, three values at any three midpoints can determine the last value at the last midpoint. It means that the number of independent local functionals on a cell is 3, and this is also the dimension of the linear polynomial space on a cell in 2d.

      Shape functions

      @@ -511,11 +511,11 @@ * | | * | | * 0---------|---------1 -*

      For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

      -

      The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

      +*

      For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

      +

      The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

      • -

        shape function $\phi_0$:

        *  +--------0.0--------+
        +

        shape function $\phi_0$:

        *  +--------0.0--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -529,7 +529,7 @@
         *  

      • -

        shape function $\phi_1$:

        *  +--------0.0--------+
        +

        shape function $\phi_1$:

        *  +--------0.0--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -543,7 +543,7 @@
         *  

      • -

        shape function $\phi_2$:

        *  +--------0.5--------+
        +

        shape function $\phi_2$:

        *  +--------0.5--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -557,7 +557,7 @@
         *  

      • -

        shape function $\phi_3$:

        *  +--------0.5--------+
        +

        shape function $\phi_3$:

        *  +--------0.5--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -887,8 +887,8 @@
           
         
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      -

      Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
-x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

      +

      Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
+x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

      Definition at line 88 of file fe_p1nc.cc.

      @@ -2952,7 +2952,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3054,7 +3054,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3338,8 +3338,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3373,11 +3373,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-12-27 18:25:00.256815040 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-12-27 18:25:00.260815067 +0000 @@ -1420,17 +1420,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1465,21 +1465,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3013,7 +3013,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3115,7 +3115,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3405,8 +3405,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3440,11 +3440,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-12-27 18:25:00.404816056 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-12-27 18:25:00.412816111 +0000 @@ -2973,7 +2973,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3081,7 +3081,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3391,8 +3391,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3428,11 +3428,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-12-27 18:25:00.560817128 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-12-27 18:25:00.556817100 +0000 @@ -509,12 +509,12 @@

      Similarly, in many cases, node functionals depend on the shape of the mesh cell, since they evaluate normal or tangential components on the faces. In order to allow for a set of transformations, the variable mapping_kind has been introduced. It needs be set in the constructor of a derived class.

      Any derived class must decide on the polynomial space to use. This polynomial space should be implemented simply as a set of vector valued polynomials like PolynomialsBDM and PolynomialsRaviartThomas. In order to facilitate this implementation, which basis the polynomial space chooses is not of importance to the current class – as described next, this class handles the transformation from the basis chosen by the polynomial space template argument to the basis we want to use for finite element computations internally.

      Determining the correct basis

      -

      In most cases, the basis used by the class that describes the polynomial space, $\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

      -\begin{align*}
+<p>In most cases, the basis used by the class that describes the polynomial space, <picture><source srcset=$\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

      +\begin{align*}
   \varphi_j = \sum_k c_{jk} \tilde\varphi_j.
-\end{align*} +\end{align*}" src="form_1225.png"/>

      -

      These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

      // Now compute the inverse node matrix, generating the correct
      +

      These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

      // Now compute the inverse node matrix, generating the correct
      // basis functions from the raw ones. For a discussion of what
      // exactly happens here, see FETools::compute_node_matrix.
      @@ -527,7 +527,7 @@
      void invert(const FullMatrix< number2 > &M)
      FullMatrix< double > compute_node_matrix(const FiniteElement< dim, spacedim > &fe)
      -

      The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

      +

      The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

      In order for this scheme to work, it is important to ensure that the size of the inverse_node_matrix be zero at the time when FETools::compute_node_matrix() is called; thus, the call to this function cannot be inlined into the last line – the result of the call really does need to be stored in the temporary object M.

      Setting the transformation

      In most cases, vector valued basis functions must be transformed when mapped from the reference cell to the actual grid cell. These transformations can be selected from the set MappingKind and stored in mapping_kind. Therefore, each constructor should contain a line like:

      @@ -2912,7 +2912,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3014,7 +3014,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3304,8 +3304,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3339,11 +3339,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-12-27 18:25:00.704818117 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-12-27 18:25:00.704818117 +0000 @@ -714,11 +714,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      @@ -1735,17 +1735,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1778,21 +1778,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3268,7 +3268,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3370,7 +3370,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3660,8 +3660,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-12-27 18:25:00.856819160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-12-27 18:25:00.852819133 +0000 @@ -851,11 +851,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1872,17 +1872,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1915,21 +1915,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3292,7 +3292,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3394,7 +3394,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3684,8 +3684,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-12-27 18:25:00.996820122 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-12-27 18:25:01.000820149 +0000 @@ -658,11 +658,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1685,17 +1685,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1728,21 +1728,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3276,7 +3276,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3378,7 +3378,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3668,8 +3668,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-12-27 18:25:01.152821193 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-12-27 18:25:01.156821220 +0000 @@ -508,7 +508,7 @@

      The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p. This TensorProductPolynomials object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomials object that includes the tensor product of Lagrange polynomials with the support points from points.

      Furthermore the constructor fills the interface_constraints, the prolongation (embedding) and the restriction matrices. These are implemented only up to a certain degree and may not be available for very high polynomial degree.

      Unit support point distribution and conditioning of interpolation

      -

      When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      +

      When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

      If combined with Gauss-Lobatto quadrature, FE_Q based on the default support points gives diagonal mass matrices. This case is demonstrated in step-48. However, this element can be combined with arbitrary quadrature rules through the usual FEValues approach, including full Gauss quadrature. In the general case, the mass matrix is non-diagonal.

      Numbering of the degrees of freedom (DoFs)

      @@ -694,9 +694,9 @@ - @@ -709,9 +709,9 @@ - @@ -724,9 +724,9 @@ - @@ -739,9 +739,9 @@ - @@ -751,7 +751,7 @@

      -

      $Q_2$ element, shape function 0

      +

      $Q_2$ element, shape function 0

      -

      $Q_2$ element, shape function 1

      +

      $Q_2$ element, shape function 1

      $Q_2$ element, shape function 2

      +

      $Q_2$ element, shape function 2

      -

      $Q_2$ element, shape function 3

      +

      $Q_2$ element, shape function 3

      $Q_2$ element, shape function 4

      +

      $Q_2$ element, shape function 4

      -

      $Q_2$ element, shape function 5

      +

      $Q_2$ element, shape function 5

      $Q_2$ element, shape function 6

      +

      $Q_2$ element, shape function 6

      -

      $Q_2$ element, shape function 7

      +

      $Q_2$ element, shape function 7

      $Q_2$ element, shape function 8

      +

      $Q_2$ element, shape function 8

      @@ -920,9 +920,9 @@
      -

      $Q_4$ element, shape function 0

      +

      $Q_4$ element, shape function 0

      -

      $Q_4$ element, shape function 1

      +

      $Q_4$ element, shape function 1

      @@ -935,9 +935,9 @@ -

      $Q_4$ element, shape function 2

      +

      $Q_4$ element, shape function 2

      -

      $Q_4$ element, shape function 3

      +

      $Q_4$ element, shape function 3

      @@ -950,9 +950,9 @@ -

      $Q_4$ element, shape function 4

      +

      $Q_4$ element, shape function 4

      -

      $Q_4$ element, shape function 5

      +

      $Q_4$ element, shape function 5

      @@ -965,9 +965,9 @@ -

      $Q_4$ element, shape function 6

      +

      $Q_4$ element, shape function 6

      -

      $Q_4$ element, shape function 7

      +

      $Q_4$ element, shape function 7

      @@ -980,9 +980,9 @@ -

      $Q_4$ element, shape function 8

      +

      $Q_4$ element, shape function 8

      -

      $Q_4$ element, shape function 9

      +

      $Q_4$ element, shape function 9

      @@ -995,9 +995,9 @@ -

      $Q_4$ element, shape function 10

      +

      $Q_4$ element, shape function 10

      -

      $Q_4$ element, shape function 11

      +

      $Q_4$ element, shape function 11

      @@ -1010,9 +1010,9 @@ -

      $Q_4$ element, shape function 12

      +

      $Q_4$ element, shape function 12

      -

      $Q_4$ element, shape function 13

      +

      $Q_4$ element, shape function 13

      @@ -1025,9 +1025,9 @@ -

      $Q_4$ element, shape function 14

      +

      $Q_4$ element, shape function 14

      -

      $Q_4$ element, shape function 15

      +

      $Q_4$ element, shape function 15

      @@ -1040,9 +1040,9 @@ -

      $Q_4$ element, shape function 16

      +

      $Q_4$ element, shape function 16

      -

      $Q_4$ element, shape function 17

      +

      $Q_4$ element, shape function 17

      @@ -1055,9 +1055,9 @@ -

      $Q_4$ element, shape function 18

      +

      $Q_4$ element, shape function 18

      -

      $Q_4$ element, shape function 19

      +

      $Q_4$ element, shape function 19

      @@ -1070,9 +1070,9 @@ -

      $Q_4$ element, shape function 20

      +

      $Q_4$ element, shape function 20

      -

      $Q_4$ element, shape function 21

      +

      $Q_4$ element, shape function 21

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-12-27 18:25:01.296822181 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-12-27 18:25:01.300822209 +0000 @@ -2265,17 +2265,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2308,21 +2308,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3486,7 +3486,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3588,7 +3588,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3849,8 +3849,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3884,11 +3884,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-12-27 18:25:01.452823253 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-12-27 18:25:01.448823226 +0000 @@ -507,17 +507,17 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_Q_Bubbles< dim, spacedim >

      Implementation of a scalar Lagrange finite element $Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
+class FE_Q_Bubbles< dim, spacedim ></div><p>Implementation of a scalar Lagrange finite element <picture><source srcset=$Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
 = 2^{p-1}\left(x_j-\frac 12\right)^{p-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

      +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_1233.png"/>. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

      This class is realized using tensor product polynomials based on equidistant or given support points, in the same way as one can provide support points to the FE_Q class's constructors.

      For more information about the spacedim template parameter check the documentation of the FiniteElement class, or the one of Triangulation.

      -

      Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

      +

      Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

      -

      Therefore, this element should be used with care for $p>3$.

      +

      Therefore, this element should be used with care for $p>3$.

      Implementation

      The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p plus the bubble enrichments. This TensorProductPolynomialsBubbles object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomialsBubbles object that includes the tensor product of Lagrange polynomials with the support points from points and the bubble enrichments as defined above.

      Furthermore the constructor fills the interface_constrains, the prolongation (embedding) and the restriction matrices.

      @@ -736,11 +736,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2459,17 +2459,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2502,21 +2502,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3593,7 +3593,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3695,7 +3695,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3956,8 +3956,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-12-27 18:25:01.596824242 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-12-27 18:25:01.600824270 +0000 @@ -909,11 +909,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2636,17 +2636,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2679,21 +2679,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3770,7 +3770,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3872,7 +3872,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4133,8 +4133,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-12-27 18:25:01.760825368 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-12-27 18:25:01.768825423 +0000 @@ -3219,17 +3219,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -3264,21 +3264,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -4658,7 +4658,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4760,7 +4760,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -5021,8 +5021,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -5056,11 +5056,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-12-27 18:25:01.904826357 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-12-27 18:25:01.908826384 +0000 @@ -504,14 +504,14 @@
      template<int dim, int spacedim = dim>
      class FE_Q_iso_Q1< dim, spacedim >

      Implementation of a scalar Lagrange finite element Qp-iso-Q1 that defines the finite element space of continuous, piecewise linear elements with p subdivisions in each coordinate direction. It yields an element with the same number of degrees of freedom as the Qp elements but using linear interpolation instead of higher order one. In other words, on every cell, the shape functions are not of higher order polynomial degree interpolating a set of node points, but are piecewise (bi-, tri-)linear within the cell and interpolating the same set of node points. This type of element is also called macro element in the literature as it can be seen as consisting of several smaller elements, namely pdim such sub-cells.

      The numbering of degrees of freedom is done in exactly the same way as in FE_Q of degree p. See there for a detailed description on how degrees of freedom are numbered within one element.

      -

      This element represents a Q-linear finite element space on a reduced mesh of size h/p. Its effect is equivalent to using FE_Q of degree one on a finer mesh by a factor p if an equivalent quadrature is used. However, this element reduces the flexibility in the choice of (adaptive) mesh size by exactly this factor p, which typically reduces efficiency. On the other hand, comparing this element with p subdivisions to the FE_Q element of degree p on the same mesh shows that the convergence is typically much worse for smooth problems. In particular, Qp elements achieve interpolation orders of hp+1 in the $L_2$ norm, whereas these elements reach only (h/p)2. For these two reasons, this element is usually not very useful as a standalone. In addition, any evaluation of face terms on the boundaries within the elements becomes impossible with this element because deal.II does not have the equivalent of FEFaceValues for lower-dimensional integrals in the interior of cells.

      +

      This element represents a Q-linear finite element space on a reduced mesh of size h/p. Its effect is equivalent to using FE_Q of degree one on a finer mesh by a factor p if an equivalent quadrature is used. However, this element reduces the flexibility in the choice of (adaptive) mesh size by exactly this factor p, which typically reduces efficiency. On the other hand, comparing this element with p subdivisions to the FE_Q element of degree p on the same mesh shows that the convergence is typically much worse for smooth problems. In particular, Qp elements achieve interpolation orders of hp+1 in the $L_2$ norm, whereas these elements reach only (h/p)2. For these two reasons, this element is usually not very useful as a standalone. In addition, any evaluation of face terms on the boundaries within the elements becomes impossible with this element because deal.II does not have the equivalent of FEFaceValues for lower-dimensional integrals in the interior of cells.

      Nonetheless, there are a few use cases where this element actually is useful:

      1. Systems of PDEs where certain variables demand for higher resolutions than the others and the additional degrees of freedom should be spent on increasing the resolution of linears instead of higher order polynomials, and you do not want to use two different meshes for the different components. This can be the case when irregularities (shocks) appear in the solution and stabilization techniques are used that work for linears but not higher order elements.

      2. -

        Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau +

        Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau element" and dates back to around the same time as the Taylor-Hood element (namely, the mid-1970s). For more information, see the paper by Bercovier and Pironneau from 1979 [Bercovier1979], and for the origins of the comparable Taylor-Hood element see [Taylor73] from 1973.

      3. @@ -2406,17 +2406,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2449,21 +2449,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3540,7 +3540,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3642,7 +3642,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3903,8 +3903,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-12-27 18:25:02.052827373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-12-27 18:25:02.060827428 +0000 @@ -510,7 +510,7 @@ class FE_RT_Bubbles< dim >

      This class implements a curl-enhanced Raviart-Thomas elements, conforming with Hdiv space. The node functionals are defined as point values in Gauss-Lobatto points. These elements generate vector fields with normal components continuous between mesh cells. The purpose of this finite element is in localizing the interactions between degrees of freedom around the nodes when an appropriate quadrature rule is used, leading to a block-diagonal mass matrix (even with full-tensor coefficient).

      The elements are defined through enrichment of classical Raviart-Thomas elements with extra curls, so that the Hdiv conformity is preserved, and the total number of degrees of freedom of FE_RT_Bubbles of order k is equal to the number of DoFs in dim copies of FE_Q of order k.

      Note
      Unlike Raviart-Thomas, the lowest possible order for this enhanced finite element is 1, i.e. $k \ge 1$.
      -

      The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

      +

      The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

      For this enhanced Raviart-Thomas element, the node values are not cell and face moments with respect to certain polynomials, but the values in Gauss-Lobatto quadrature points. The nodal values on edges (faces in 3d) are evaluated first, according to the natural ordering of the edges (faces) of a cell. The interior degrees of freedom are evaluated last.

      For an RT-Bubbles element of degree k, we choose (k+1)dim-1 Gauss-Lobatto points on each face. These points are ordered lexicographically with respect to the orientation of the face. In the interior of the cells, the values are computed using an anisotropic Gauss-Lobatto formula for integration. The mass matrix assembled with the use of this same quadrature rule, is block diagonal with blocks corresponding to quadrature points. See "Higher order multipoint flux mixed finite element methods on quadrilaterals and hexahedra" for more details.

      @@ -523,7 +523,7 @@

      - +
      Left - $2d,\,k=3$, right - $3d,\,k=2$.
      Left - $2d,\,k=3$, right - $3d,\,k=2$.
      Todo
      Implement restriction matrices
      @@ -749,11 +749,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3348,7 +3348,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3450,7 +3450,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3740,8 +3740,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-12-27 18:25:02.204828417 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-12-27 18:25:02.200828389 +0000 @@ -730,11 +730,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1859,17 +1859,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1904,21 +1904,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3394,7 +3394,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3496,7 +3496,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3786,8 +3786,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-12-27 18:25:02.360829489 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-12-27 18:25:02.356829461 +0000 @@ -519,11 +519,11 @@

      Detailed Description

      template<int dim>
      -class FE_RaviartThomas< dim >

      Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

      +class FE_RaviartThomas< dim >

      Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

      Other properties of the Raviart-Thomas element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one. (There is, however, the FE_RaviartThomasNodal element that uses point values.)

      We follow the commonly used – though confusing – definition of the "degree" of RT elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_RaviartThomas(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

      -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -531,7 +531,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1195.png"/>

      This class is not implemented for the codimension one case (spacedim != dim).

      Interpolation

      @@ -798,11 +798,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3463,7 +3463,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3565,7 +3565,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3826,8 +3826,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-12-27 18:25:02.516830560 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-12-27 18:25:02.516830560 +0000 @@ -811,11 +811,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3482,7 +3482,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3584,7 +3584,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3874,8 +3874,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-12-27 18:25:02.652831494 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-12-27 18:25:02.656831521 +0000 @@ -490,7 +490,7 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_SimplexDGP< dim, spacedim >

      Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

      +class FE_SimplexDGP< dim, spacedim >

      Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

      Also see Simplex support.

      Definition at line 188 of file fe_simplex_p.h.

      @@ -1050,11 +1050,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2071,17 +2071,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2114,21 +2114,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3344,7 +3344,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3446,7 +3446,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3707,8 +3707,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-12-27 18:25:02.792832455 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-12-27 18:25:02.800832510 +0000 @@ -490,7 +490,7 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_SimplexP< dim, spacedim >

      Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$. The corresponding element on hypercube cells is FE_Q, on wegdes it is FE_WedgeP, and on pyramids it is FE_PyramidP.

      +class FE_SimplexP< dim, spacedim >

      Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$. The corresponding element on hypercube cells is FE_Q, on wegdes it is FE_WedgeP, and on pyramids it is FE_PyramidP.

      Also see Simplex support.

      Definition at line 132 of file fe_simplex_p.h.

      @@ -1050,11 +1050,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2071,17 +2071,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2114,21 +2114,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3344,7 +3344,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3446,7 +3446,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3707,8 +3707,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-12-27 18:25:02.936833444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-12-27 18:25:02.944833499 +0000 @@ -957,11 +957,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1978,17 +1978,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2021,21 +2021,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3336,7 +3336,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3438,7 +3438,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3699,8 +3699,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-12-27 18:25:03.084834460 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-12-27 18:25:03.084834460 +0000 @@ -916,11 +916,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1943,17 +1943,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1986,21 +1986,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3359,7 +3359,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3461,7 +3461,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3722,8 +3722,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-12-27 18:25:03.228835449 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-12-27 18:25:03.236835504 +0000 @@ -3274,7 +3274,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3376,7 +3376,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3637,8 +3637,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:03.376836466 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:03.376836466 +0000 @@ -3449,7 +3449,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3551,7 +3551,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3808,8 +3808,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3843,11 +3843,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-12-27 18:25:03.512837400 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-12-27 18:25:03.520837454 +0000 @@ -714,11 +714,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      @@ -1735,17 +1735,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1778,21 +1778,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3268,7 +3268,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3370,7 +3370,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3660,8 +3660,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-12-27 18:25:03.664838443 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-12-27 18:25:03.668838471 +0000 @@ -851,11 +851,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1872,17 +1872,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1915,21 +1915,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3292,7 +3292,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3394,7 +3394,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3684,8 +3684,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-12-27 18:25:03.800839377 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-12-27 18:25:03.808839432 +0000 @@ -658,11 +658,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1685,17 +1685,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1728,21 +1728,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3276,7 +3276,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3378,7 +3378,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3668,8 +3668,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-12-27 18:25:03.968840531 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-12-27 18:25:03.968840531 +0000 @@ -501,7 +501,7 @@ class FiniteElement< dim, spacedim >

      This is the base class for finite elements in arbitrary dimensions. It declares the interface both in terms of member variables and public member functions through which properties of a concrete implementation of a finite element can be accessed. This interface generally consists of a number of groups of variables and functions that can roughly be delineated as follows:

      • Basic information about the finite element, such as the number of degrees of freedom per vertex, edge, or cell. This kind of data is stored in the FiniteElementData base class. (Though the FiniteElement::get_name() member function also falls into this category.)
      • A description of the shape functions and their derivatives on the reference cell $[0,1]^d$, if an element is indeed defined by mapping shape functions from the reference cell to an actual cell.
      • -
      • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
      • +
      • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
      • Functions that describe the properties of individual shape functions, for example which vector components of a vector-valued finite element's shape function is nonzero, or whether an element is primitive.
      • For elements that are interpolatory, such as the common $Q_p$ Lagrange elements, data that describes where their support points are located.
      • Functions that define the interface to the FEValues class that is almost always used to access finite element shape functions from user code.
      • @@ -586,7 +586,7 @@
      21 1 0 8 1

      What we see is the following: there are a total of 22 degrees-of-freedom on this element with components ranging from 0 to 2. Each DoF corresponds to one of the two base elements used to build FESystem : $\mathbb Q_2$ or $\mathbb
-  Q_1$. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

      + Q_1$" src="form_1037.png"/>. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

      Support points

      Finite elements are frequently defined by defining a polynomial space and a set of dual functionals. If these functionals involve point evaluations, then the element is "interpolatory" and it is possible to interpolate an arbitrary (but sufficiently smooth) function onto the finite element space by evaluating it at these points. We call these points "support points".

      Most finite elements are defined by mapping from the reference cell to a concrete cell. Consequently, the support points are then defined on the reference ("unit") cell, see this glossary entry. The support points on a concrete cell can then be computed by mapping the unit support points, using the Mapping class interface and derived classes, typically via the FEValues class.

      @@ -618,8 +618,8 @@

      Through this construction, the degrees of freedom on the child faces are constrained to the degrees of freedom on the parent face. The information so provided is typically consumed by the DoFTools::make_hanging_node_constraints() function.

      Note
      The hanging node constraints described by these matrices are only relevant to the case where the same finite element space is used on neighboring (but differently refined) cells. The case that the finite element spaces on different sides of a face are different, i.e., the $hp$ case (see hp-finite element support) is handled by separate functions. See the FiniteElement::get_face_interpolation_matrix() and FiniteElement::get_subface_interpolation_matrix() functions.

      Interpolation matrices in three dimensions

      -

      For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

      -

      The numbering of the degrees of freedom on the interior of the refined faces for the index $m$ is as follows: let $d_v$ and $d_l$ be as above, and $d_q$ be the number of degrees of freedom per quadrilateral (and therefore per face), then $m=0...d_v-1$ denote the dofs on the vertex at the center, $m=d_v...5d_v-1$ for the dofs on the vertices at the center of the bounding lines of the quadrilateral, $m=5d_v..5d_v+4*d_l-1$ are for the degrees of freedom on the four lines connecting the center vertex to the outer boundary of the mother face, $m=5d_v+4*d_l...5d_v+4*d_l+8*d_l-1$ for the degrees of freedom on the small lines surrounding the quad, and $m=5d_v+12*d_l...5d_v+12*d_l+4*d_q-1$ for the dofs on the four child faces. Note the direction of the lines at the boundary of the quads, as shown below.

      +

      For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

      +

      The numbering of the degrees of freedom on the interior of the refined faces for the index $m$ is as follows: let $d_v$ and $d_l$ be as above, and $d_q$ be the number of degrees of freedom per quadrilateral (and therefore per face), then $m=0...d_v-1$ denote the dofs on the vertex at the center, $m=d_v...5d_v-1$ for the dofs on the vertices at the center of the bounding lines of the quadrilateral, $m=5d_v..5d_v+4*d_l-1$ are for the degrees of freedom on the four lines connecting the center vertex to the outer boundary of the mother face, $m=5d_v+4*d_l...5d_v+4*d_l+8*d_l-1$ for the degrees of freedom on the small lines surrounding the quad, and $m=5d_v+12*d_l...5d_v+12*d_l+4*d_q-1$ for the dofs on the four child faces. Note the direction of the lines at the boundary of the quads, as shown below.

      The order of the twelve lines and the four child faces can be extracted from the following sketch, where the overall order of the different dof groups is depicted:

      *    *--15--4--16--*
       *    |      |      |
       *    10 19  6  20  12
      @@ -660,7 +660,7 @@
       
    • Compute the basis vj of the finite element shape function space by applying M-1 to the basis wj.
    • -

      The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

      +

      The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

      this->inverse_node_matrix.reinit(this->n_dofs_per_cell(),
      this->n_dofs_per_cell()); this->inverse_node_matrix.invert(M);
      @@ -697,7 +697,7 @@ R_1 = \left(\begin{matrix}0 & 0 \\ 0 & 1\end{matrix}\right). \]" src="form_1057.png"/>

      -

      However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

      meshes: *-------* *----*----*
      +

      However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

      meshes: *-------* *----*----*
      local DoF numbers: 0 2 1 0 2 1|0 2 1
      global DoF numbers: 0 2 1 0 2 1 4 3

      Writing things as the sum over matrix operations as above would not easily work because we have to add nonzero values to $U^\text{coarse}_2$ twice, once for each child.

      @@ -2629,7 +2629,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2713,7 +2713,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2967,8 +2967,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3004,11 +3004,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 2024-12-27 18:25:04.024840915 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 2024-12-27 18:25:04.028840943 +0000 @@ -332,8 +332,8 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      - - + +
      [in]dofs_per_objectA vector that describes the number of degrees of freedom on geometrical objects for each dimension. This vector must have size dim+1, and entry 0 describes the number of degrees of freedom per vertex, entry 1 the number of degrees of freedom per line, etc. As an example, for the common $Q_1$ Lagrange element in 2d, this vector would have elements (1,0,0). On the other hand, for a $Q_3$ element in 3d, it would have entries (1,2,4,8).
      [in]n_componentsNumber of vector components of the element.
      [in]degreeThe maximal polynomial degree of any of the shape functions of this element in any variable on the reference element. For example, for the $Q_1$ element (in any space dimension), this would be one; this is so despite the fact that the element has a shape function of the form $\hat x\hat y$ (in 2d) and $\hat x\hat y\hat z$ (in 3d), which, although quadratic and cubic polynomials, are still only linear in each reference variable separately. The information provided by this variable is typically used in determining what an appropriate quadrature formula is.
      [in]conformityA variable describing which Sobolev space this element conforms to. For example, the $Q_p$ Lagrange elements (implemented by the FE_Q class) are $H^1$ conforming, whereas the Raviart-Thomas element (implemented by the FE_RaviartThomas class) is $H_\text{div}$ conforming; finally, completely discontinuous elements (implemented by the FE_DGQ class) are only $L_2$ conforming.
      [in]degreeThe maximal polynomial degree of any of the shape functions of this element in any variable on the reference element. For example, for the $Q_1$ element (in any space dimension), this would be one; this is so despite the fact that the element has a shape function of the form $\hat x\hat y$ (in 2d) and $\hat x\hat y\hat z$ (in 3d), which, although quadratic and cubic polynomials, are still only linear in each reference variable separately. The information provided by this variable is typically used in determining what an appropriate quadrature formula is.
      [in]conformityA variable describing which Sobolev space this element conforms to. For example, the $Q_p$ Lagrange elements (implemented by the FE_Q class) are $H^1$ conforming, whereas the Raviart-Thomas element (implemented by the FE_RaviartThomas class) is $H_\text{div}$ conforming; finally, completely discontinuous elements (implemented by the FE_DGQ class) are only $L_2$ conforming.
      [in]block_indicesAn argument that describes how the base elements of a finite element are grouped. The default value constructs a single block that consists of all dofs_per_cell degrees of freedom. This is appropriate for all "atomic" elements (including non-primitive ones) and these can therefore omit this argument. On the other hand, composed elements such as FESystem will want to pass a different value here.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 2024-12-27 18:25:04.056841135 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 2024-12-27 18:25:04.060841162 +0000 @@ -142,7 +142,7 @@
      template<typename T>
      class FiniteSizeHistory< T >

      A helper class to store a finite-size collection of objects of type T. If the number of elements exceeds the specified maximum size of the container, the oldest element is removed. Additionally, random access and removal of elements is implemented. Indexing is done relative to the last added element.

      In order to optimize the container for usage with memory-demanding objects (i.e. linear algebra vectors), the removal of an element does not free the memory. Instead the element is being kept in a separate cache so that subsequent addition does not require re-allocation of memory.

      -

      The primary usage of this class is in solvers to store a history of vectors. That is, if at the iteration $k$ we store $m$ vectors from previous iterations $\{k-1,k-2,...,k-m\}$, then addition of the new element will make the object contain elements from iterations $\{k,k-1,k-2,...,k-m+1\}$.

      +

      The primary usage of this class is in solvers to store a history of vectors. That is, if at the iteration $k$ we store $m$ vectors from previous iterations $\{k-1,k-2,...,k-m\}$, then addition of the new element will make the object contain elements from iterations $\{k,k-1,k-2,...,k-m+1\}$.

      Definition at line 48 of file history.h.

      Constructor & Destructor Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-12-27 18:25:04.100841437 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-12-27 18:25:04.104841464 +0000 @@ -473,7 +473,7 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -482,7 +482,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, dim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-12-27 18:25:04.160841849 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-12-27 18:25:04.164841876 +0000 @@ -1096,8 +1096,8 @@
      -

      Return the l1-norm of the matrix, where $||M||_1 =
-\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

      +

      Return the l1-norm of the matrix, where $||M||_1 =
+\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

      @@ -1117,8 +1117,8 @@
      -

      Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
-\sum_j |M_{ij}|$ (maximum of the sums over rows).

      +

      Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
+\sum_j |M_{ij}|$ (maximum of the sums over rows).

      @@ -2071,7 +2071,7 @@

      A=Inverse(A). A must be a square matrix. Inversion of this matrix by Gauss-Jordan algorithm with partial pivoting. This process is well-behaved for positive definite matrices, but be aware of round-off errors in the indefinite case.

      In case deal.II was configured with LAPACK, the functions Xgetrf and Xgetri build an LU factorization and invert the matrix upon that factorization, providing best performance up to matrices with a few hundreds rows and columns.

      -

      The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

      +

      The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

      @@ -2115,7 +2115,7 @@
      -

      Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

      +

      Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

      ExcMatrixNotPositiveDefinite will be thrown in the case that the matrix is not positive definite.

      @@ -2139,7 +2139,7 @@ const Vector< number2 > & W&#href_anchor"memdoc"> -

      *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

      +

      *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-12-27 18:25:04.216842234 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-12-27 18:25:04.220842261 +0000 @@ -256,14 +256,14 @@

      Functions that return tensors

      If the functions you are dealing with have a number of components that are a priori known (for example, dim elements), you might consider using the TensorFunction class instead. This is, in particular, true if the objects you return have the properties of a tensor, i.e., they are for example dim-dimensional vectors or dim-by-dim matrices. On the other hand, functions like VectorTools::interpolate or VectorTools::interpolate_boundary_values definitely only want objects of the current type. You can use the VectorFunctionFromTensorFunction class to convert the former to the latter.

      Functions that return vectors of other data types

      -

      Most of the time, your functions will have the form $f : \Omega \rightarrow
-{\mathbb R}^{n_\text{components}}$. However, there are occasions where you want the function to return vectors (or scalars) over a different number field, for example functions that return complex numbers or vectors of complex numbers: $f : \Omega \rightarrow {\mathbb
-C}^{n_\text{components}}$. In such cases, you can choose a value different than the default double for the second template argument of this class: it describes the scalar type to be used for each component of your return values. It defaults to double, but in the example above, it could be set to std::complex<double>. step-58 is an example of this.

      +

      Most of the time, your functions will have the form $f : \Omega \rightarrow
+{\mathbb R}^{n_\text{components}}$. However, there are occasions where you want the function to return vectors (or scalars) over a different number field, for example functions that return complex numbers or vectors of complex numbers: $f : \Omega \rightarrow {\mathbb
+C}^{n_\text{components}}$. In such cases, you can choose a value different than the default double for the second template argument of this class: it describes the scalar type to be used for each component of your return values. It defaults to double, but in the example above, it could be set to std::complex<double>. step-58 is an example of this.

      Template Parameters
      - - + +
      dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
      RangeNumberTypeThe scalar type of the vector space that is the range (or image) of this function. As discussed above, objects of the current type represent functions from ${\mathbb
-  R}^\text{dim}$ to $S^{n_\text{components}}$ where $S$ is the underlying scalar type of the vector space. The type of $S$ is given by the RangeNumberType template argument.
      dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
      RangeNumberTypeThe scalar type of the vector space that is the range (or image) of this function. As discussed above, objects of the current type represent functions from ${\mathbb
+  R}^\text{dim}$ to $S^{n_\text{components}}$ where $S$ is the underlying scalar type of the vector space. The type of $S$ is given by the RangeNumberType template argument.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 2024-12-27 18:25:04.272842618 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 2024-12-27 18:25:04.272842618 +0000 @@ -360,27 +360,27 @@

      Names of difference formulas.

      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-12-27 18:25:04.324842976 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-12-27 18:25:04.328843003 +0000 @@ -553,7 +553,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. The default implementation calls the get_gradient() method of the FunctionManifold::push_forward_function() member class. If you construct this object using the constructor that takes two string expression, then the default implementation of this method uses a finite difference scheme to compute the gradients(see the AutoDerivativeFunction() class for details), and you can specify the size of the spatial step size at construction time with the h parameter.

      Refer to the general documentation of this class for more information.

      @@ -720,24 +720,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -746,11 +746,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html 2024-12-27 18:25:04.388843415 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html 2024-12-27 18:25:04.396843470 +0000 @@ -522,27 +522,27 @@

      Names of difference formulas.

      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-12-27 18:25:04.448843827 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-12-27 18:25:04.452843855 +0000 @@ -228,7 +228,7 @@

      Detailed Description

      template<int dim>
      -class Functions::CoordinateRestriction< dim >

      This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

      +class Functions::CoordinateRestriction< dim >

      This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

      double z = ...
      unsigned int restricted_direction = 2;
      @@ -236,7 +236,7 @@
      const SmartPointer< const Function< dim+1 > > function
      -

      The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

      +

      The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

      Definition at line 50 of file function_restriction.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-12-27 18:25:04.496844157 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-12-27 18:25:04.500844184 +0000 @@ -243,7 +243,7 @@ x=(x,y,z)$" src="form_492.png"/> will find the box so that $x_k\le x\le x_{k+1}, y_l\le y\le
 y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

      -
      Note
      If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.
      +
      Note
      If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.

      If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

      Note
      The use of the related class InterpolatedUniformGridData is discussed in step-53.

      Dealing with large data sets

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-12-27 18:25:04.552844541 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-12-27 18:25:04.556844569 +0000 @@ -235,7 +235,7 @@ class Functions::InterpolatedUniformGridData< dim >

      A scalar function that computes its values by (bi-, tri-)linear interpolation from a set of point data that are arranged on a uniformly spaced tensor product mesh. In other words, considering the three-dimensional case, let there be points $x_0,\ldots, x_{K-1}$ that result from a uniform subdivision of the interval $[x_0,x_{K-1}]$ into $K-1$ sub-intervals of size $\Delta x = (x_{K-1}-x_0)/(K-1)$, and similarly $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$. Also consider data $d_{klm}$ defined at point $(x_k,y_l,z_m)^T$, then evaluating the function at a point $\mathbf x=(x,y,z)$ will find the box so that $x_k\le x\le x_{k+1},
 y_l\le y\le y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

      -
      Note
      If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.
      +
      Note
      If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.

      If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

      Note
      The use of this class is discussed in step-53.

      Dealing with large data sets

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-12-27 18:25:04.600844871 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-12-27 18:25:04.604844898 +0000 @@ -229,13 +229,13 @@

      Detailed Description

      A function that solves the Laplace equation (with specific boundary values but zero right hand side) and that has a singularity at the center of the L-shaped domain in 2d (i.e., at the location of the re-entrant corner of this non-convex domain).

      The function is given in polar coordinates by $r^{\frac{2}{3}}
-\sin(\frac{2}{3} \phi)$ with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

      +\sin(\frac{2}{3} \phi)$" src="form_466.png"/> with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

      This function is often used to illustrate that the solutions of the Laplace equation

      \[
   -\Delta u = 0
 \]

      -

      can be singular even if the boundary values are smooth. (Here, if the domain is the L-shaped domain $(-1,1)^2 \backslash [0,1]^2$, the boundary values for $u$ are zero on the two line segments adjacent to the origin, and equal to $r^{\frac{2}{3}} \sin(\frac{2}{3} \phi)$ on the remaining parts of the boundary.) The function itself remains bounded on the domain, but its gradient is of the form $r^{-1/3}$ in the vicinity of the origin and consequently diverges as one approaches the origin.

      +

      can be singular even if the boundary values are smooth. (Here, if the domain is the L-shaped domain $(-1,1)^2 \backslash [0,1]^2$, the boundary values for $u$ are zero on the two line segments adjacent to the origin, and equal to $r^{\frac{2}{3}} \sin(\frac{2}{3} \phi)$ on the remaining parts of the boundary.) The function itself remains bounded on the domain, but its gradient is of the form $r^{-1/3}$ in the vicinity of the origin and consequently diverges as one approaches the origin.

      Definition at line 409 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 2024-12-27 18:25:04.660845283 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 2024-12-27 18:25:04.668845338 +0000 @@ -392,27 +392,27 @@

      Names of difference formulas.

      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 2024-12-27 18:25:04.712845640 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 2024-12-27 18:25:04.716845667 +0000 @@ -231,7 +231,7 @@

      Detailed Description

      template<int dim>
      -class Functions::PointRestriction< dim >

      This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

      +class Functions::PointRestriction< dim >

      This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

      Point<2> point(y, z);
      unsigned int open_direction = 0;
      @@ -240,7 +240,7 @@
      const SmartPointer< const Function< dim+1 > > function
      -

      The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

      +

      The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

      Definition at line 109 of file function_restriction.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-12-27 18:25:04.764845997 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-12-27 18:25:04.772846052 +0000 @@ -332,8 +332,8 @@ const std::vector< double > & coefficients&#href_anchor"memdoc"> -

      Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
-x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

      +

      Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
+x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

      Definition at line 2837 of file function_lib.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 2024-12-27 18:25:04.812846327 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 2024-12-27 18:25:04.816846354 +0000 @@ -226,7 +226,7 @@

      Detailed Description

      template<int dim>
      class Functions::RayleighKotheVortex< dim >

      A class that represents a time-dependent function object for a Rayleigh–Kothe vortex vector field. This is generally used as flow pattern in complex test cases for interface tracking methods (e.g., volume-of-fluid and level-set approaches) since it leads to strong rotation and elongation of the fluid [Blais2013].

      -

      The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

      +

      The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

      \[
 \Psi = \frac{1}{\pi} \sin^2 (\pi x) \sin^2 (\pi y) \cos \left( \pi
 \frac{t}{T} \right)
/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html	2024-12-27 18:25:04.880846794 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html	2024-12-27 18:25:04.872846739 +0000
@@ -238,11 +238,11 @@
 <div class=

      template<int dim>
      class Functions::SignedDistance::Ellipsoid< dim >

      Signed-distance level set function to an ellipsoid defined by:

      -\[
+<picture><source srcset=\[
 \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} = 1
-\] +\]" src="form_533.png"/>

      -

      Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

      +

      Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

      Definition at line 144 of file function_signed_distance.h.

      Member Typedef Documentation

      @@ -462,9 +462,9 @@

      Evaluates the ellipsoid function:

      -\[
+<picture><source srcset=\[
 f(\vec{x}) = \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} - 1
-\] +\]" src="form_539.png"/>

      Definition at line 200 of file function_signed_distance.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-12-27 18:25:04.924847096 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-12-27 18:25:04.928847123 +0000 @@ -226,7 +226,7 @@

      Detailed Description

      template<int dim>
      -class Functions::SignedDistance::Plane< dim >

      Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

      +class Functions::SignedDistance::Plane< dim >

      Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

      Definition at line 104 of file function_signed_distance.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-12-27 18:25:04.980847480 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-12-27 18:25:04.984847508 +0000 @@ -226,7 +226,7 @@

      Detailed Description

      template<int dim>
      class Functions::SignedDistance::Rectangle< dim >

      Signed-distance level set function of a rectangle.

      -

      This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      +

      This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      Contour surfaces of the signed distance function of a 3D rectangle are illustrated below:

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-12-27 18:25:05.032847837 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-12-27 18:25:05.036847865 +0000 @@ -226,9 +226,9 @@

      Detailed Description

      template<int dim>
      -class Functions::SignedDistance::Sphere< dim >

      Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

      -

      This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
-\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

      +class Functions::SignedDistance::Sphere< dim >

      Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

      +

      This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
+\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

      Definition at line 48 of file function_signed_distance.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-12-27 18:25:05.088848222 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-12-27 18:25:05.092848249 +0000 @@ -226,8 +226,8 @@

      Detailed Description

      template<int dim>
      class Functions::SignedDistance::ZalesakDisk< dim >

      Signed-distance level set function of Zalesak's disk proposed in [zalesak1979fully].

      -

      It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
--\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      +

      It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
+-\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      Contour surfaces of the signed distance function of a 3D Zalesak's disk are illustrated below:

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-12-27 18:25:05.144848606 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-12-27 18:25:05.148848634 +0000 @@ -275,7 +275,7 @@

      Detailed Description

      A singular solution to Stokes' equations on a 2d L-shaped domain.

      -

      This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

      +

      This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

      Taken from Houston, Schötzau, Wihler, proceeding ENUMATH 2003.

      Definition at line 245 of file flow_function.h.

      @@ -1737,7 +1737,7 @@
      -

      The exponent of the radius, computed as the solution to $\sin(\lambda\omega)+\lambda \sin(\omega)=0$

      +

      The exponent of the radius, computed as the solution to $\sin(\lambda\omega)+\lambda \sin(\omega)=0$

      Definition at line 282 of file flow_function.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-12-27 18:25:05.196848963 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-12-27 18:25:05.204849018 +0000 @@ -920,7 +920,7 @@
      Returns
      This function returns a struct containing some extra data stored by the ExodusII file that cannot be loaded into a Triangulation - see ExodusIIData for more information.
      -

      A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

      +

      A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

      Definition at line 3772 of file grid_in.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-12-27 18:25:05.228849183 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-12-27 18:25:05.236849238 +0000 @@ -150,10 +150,10 @@

      Detailed Description

      template<typename number>
      class Householder< number >

      QR-decomposition of a full matrix.

      -

      This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

      +

      This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

      Implementation details

      -

      The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

      -

      The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

      +

      The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

      +

      The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

      Note
      Instantiations for this template are provided for <float> and <double>; others can be generated in application programs (see the section on Template instantiations in the manual).

      Definition at line 79 of file householder.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-12-27 18:25:05.260849403 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-12-27 18:25:05.264849430 +0000 @@ -147,13 +147,13 @@  

      Detailed Description

      -

      Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

      +

      Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

      Initialization

      The main usefulness of this class lies in its ability to initialize other matrix, like this:

      std_cxx20::type_identity< T > identity
      -

      This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

      +

      This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

      Preconditioning

      No preconditioning at all is equivalent to preconditioning with preconditioning with the identity matrix. deal.II has a specialized class for this purpose, PreconditionIdentity, than can be used in a context as shown in the documentation of that class. The present class can be used in much the same way, although without any additional benefit:

      SolverControl solver_control (1000, 1e-12);
      SolverCG<> cg (solver_control);
      /usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-12-27 18:25:05.292849623 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-12-27 18:25:05.300849677 +0000 @@ -181,7 +181,7 @@

      Detailed Description

      template<typename VectorType>
      -class ImplicitQR< VectorType >

      A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

      +class ImplicitQR< VectorType >

      A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

      The class is designed to update a given (possibly empty) QR factorization due to the addition of a new column vector. This is equivalent to constructing an orthonormal basis by the Gram-Schmidt procedure. The class also provides update functionality when the column is removed.

      The VectorType template argument may either be a parallel and serial vector, and only need to have basic operations such as additions, scalar product, etc. It also needs to have a copy-constructor.

      @@ -346,7 +346,7 @@
      -

      Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

      +

      Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

      Implements BaseQR< VectorType >.

      @@ -380,7 +380,7 @@
      -

      Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      +

      Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      Implements BaseQR< VectorType >.

      @@ -414,7 +414,7 @@
      -

      Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

      +

      Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

      Implements BaseQR< VectorType >.

      @@ -448,7 +448,7 @@
      -

      Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      +

      Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      Implements BaseQR< VectorType >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-12-27 18:25:05.356850062 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-12-27 18:25:05.360850089 +0000 @@ -863,7 +863,7 @@
      -

      Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

      +

      Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

      Definition at line 1130 of file index_set.cc.

      @@ -1158,7 +1158,7 @@

      This command takes a "mask", i.e., a second index set of same size as the current one and returns the intersection of the current index set the mask, shifted to the index of an entry within the given mask. For example, if the current object is a an IndexSet object representing an index space [0,100) containing indices [20,40), and if the mask represents an index space of the same size but containing all 50 odd indices in this range, then the result will be an index set for a space of size 50 that contains those indices that correspond to the question "the how many'th entry in the mask are the indices [20,40). This will result in an index set of size 50 that contains the indices {11,12,13,14,15,16,17,18,19,20} (because, for example, the index 20 in the original set is not in the mask, but 21 is and corresponds to the 11th entry of the mask – the mask contains the elements {1,3,5,7,9,11,13,15,17,19,21,...}).

      In other words, the result of this operation is the intersection of the set represented by the current object and the mask, as seen within the mask. This corresponds to the notion of a view: The mask is a window through which we see the set represented by the current object.

      -

      A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). We solve this in parallel, so every MPI process has its own locally_owned_dofs index set that describes which among all $N_\text{dofs}$ degrees of freedom this process owns. Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. The question is which of the components of these 2-block vectors are locally owned? The answer is that we need to get a view of the locally_owned_dofs index set in which we apply a mask that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask (corresponding to an index set of size $N_\text{dofs}$) that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom. The resulting view is an index set of size $N_u+N_T$ that contains the indices of the locally owned $u$ and $T$ degrees of freedom.

      +

      A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). We solve this in parallel, so every MPI process has its own locally_owned_dofs index set that describes which among all $N_\text{dofs}$ degrees of freedom this process owns. Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. The question is which of the components of these 2-block vectors are locally owned? The answer is that we need to get a view of the locally_owned_dofs index set in which we apply a mask that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask (corresponding to an index set of size $N_\text{dofs}$) that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom. The resulting view is an index set of size $N_u+N_T$ that contains the indices of the locally owned $u$ and $T$ degrees of freedom.

      Definition at line 308 of file index_set.cc.

      @@ -1198,7 +1198,7 @@
      -

      Remove all elements contained in other from this set. In other words, if $x$ is the current object and $o$ the argument, then we compute $x
+<p>Remove all elements contained in <code>other</code> from this set. In other words, if <picture><source srcset=$x$ is the current object and $o$ the argument, then we compute $x
 \leftarrow x \backslash o$.

      Definition at line 473 of file index_set.cc.

      @@ -1943,7 +1943,7 @@
      -

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      +

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      is.add_range(0, N);

      This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

      if (my_index_set == complete_index_set(my_index_set.size())
      /usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-12-27 18:25:05.400850364 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-12-27 18:25:05.404850392 +0000 @@ -1228,7 +1228,7 @@
      -

      If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

      +

      If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

      Definition at line 317 of file polynomial.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-12-27 18:25:05.500851051 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-12-27 18:25:05.508851106 +0000 @@ -3861,7 +3861,7 @@
      -

      The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

      +

      The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

      Definition at line 988 of file lapack_full_matrix.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-12-27 18:25:05.548851381 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-12-27 18:25:05.556851436 +0000 @@ -934,7 +934,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      Definition at line 492 of file cuda_vector.cc.

      @@ -988,7 +988,7 @@
      return_value = *this * W;

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Definition at line 531 of file cuda_vector.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html 2024-12-27 18:25:05.608851793 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html 2024-12-27 18:25:05.612851820 +0000 @@ -1129,7 +1129,7 @@

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      The vectors need to have the same layout.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Definition at line 521 of file trilinos_epetra_vector.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-12-27 18:25:05.668852205 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-12-27 18:25:05.672852232 +0000 @@ -323,7 +323,7 @@

      Detailed Description

      template<typename Number>
      -class LinearAlgebra::ReadWriteVector< Number >

      ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually store all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

      +class LinearAlgebra::ReadWriteVector< Number >

      ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually store all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

      This class provides access to individual elements to be read or written. However, it does not allow global operations such as taking the norm or dot products between vectors.

      Storing elements

      Most of the time, one will simply read from or write into a vector of the current class using the global numbers of these degrees of freedom. This is done using operator()() or operator[]() which call global_to_local() to transform the global index into a local one. In such cases, it is clear that one can only access elements of the vector that the current object indeed stores.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:05.740852699 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:05.748852754 +0000 @@ -1010,7 +1010,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      Definition at line 357 of file trilinos_tpetra_block_sparse_matrix.h.

      @@ -1042,7 +1042,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Definition at line 372 of file trilinos_tpetra_block_sparse_matrix.h.

      @@ -2036,7 +2036,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -2610,7 +2610,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      @@ -2718,7 +2718,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2024-12-27 18:25:05.812853194 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2024-12-27 18:25:05.820853249 +0000 @@ -1562,7 +1562,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -1588,7 +1588,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1640,7 +1640,7 @@
      -

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      +

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      @@ -1666,7 +1666,7 @@
      -

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      +

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      @@ -1692,7 +1692,7 @@
      -

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      +

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      @@ -1727,7 +1727,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -1974,7 +1974,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 2024-12-27 18:25:05.888853716 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 2024-12-27 18:25:05.892853743 +0000 @@ -1845,7 +1845,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

      The vector has to be initialized with the same IndexSet the matrix was initialized with.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 2024-12-27 18:25:05.964854237 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 2024-12-27 18:25:05.964854237 +0000 @@ -477,7 +477,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      It is possible to specify the number of columns entries per row using the optional n_entries_per_row argument. However, this value does not need to be accurate or even given at all, since one does usually not have this kind of information before building the sparsity pattern (the usual case when the function DoFTools::make_sparsity_pattern() is called). The entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes. However, a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 102 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -509,7 +509,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      The vector n_entries_per_row specifies the number of entries in each row (an information usually not available, though).

      Definition at line 113 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -799,7 +799,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The number of columns entries per row is specified as the maximum number of entries argument. This does not need to be an accurate number since the entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes, but a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 234 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -831,7 +831,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The vector n_entries_per_row specifies the number of entries in each row.

      Definition at line 248 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -1372,7 +1372,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      Definition at line 865 of file trilinos_tpetra_sparsity_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 2024-12-27 18:25:06.028854677 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 2024-12-27 18:25:06.032854704 +0000 @@ -1570,7 +1570,7 @@

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      The vectors need to have the same layout.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-12-27 18:25:06.112855254 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-12-27 18:25:06.116855281 +0000 @@ -1357,7 +1357,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      @@ -1576,7 +1576,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

      Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

      +

      Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

      Obviously, this function can only be used if all blocks of both vectors are of the same size.

      Note
      Internally, a single global reduction will be called to accumulate scalar product between locally owned degrees of freedom.
      @@ -1606,7 +1606,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

      Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

      +

      Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

      Obviously, this function can only be used if all blocks of both vectors are of the same size.

      Note
      Internally, a single global reduction will be called to accumulate the scalar product between locally owned degrees of freedom.
      @@ -1641,7 +1641,7 @@ const Number b = Number(1.)&#href_anchor"memdoc"> -

      Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

      +

      Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

      Obviously, this function can only be used if all blocks of both vectors are of the same size.

      @@ -1845,7 +1845,7 @@
      -

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      +

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      @@ -1865,7 +1865,7 @@
      -

      Return the square of the $l_2$ norm of the vector.

      +

      Return the square of the $l_2$ norm of the vector.

      @@ -1915,7 +1915,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const ::Vector< OtherNumber > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2563,7 +2563,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -2597,7 +2597,7 @@

      Performs a combined operation of a vector addition and a subsequent inner product, returning the value of the inner product. In other words, the result of this function is the same as if the user called

      this->add(a, V);
      return_value = *this * W;

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2788,7 +2788,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-12-27 18:25:06.204855885 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-12-27 18:25:06.204855885 +0000 @@ -1034,7 +1034,7 @@ const MPI_Comm comm_sm = MPI_COMM_SELF&#href_anchor"memdoc">

      Initialize vector with local_size locally-owned and ghost_size ghost degrees of freedoms.

      The optional argument comm_sm, which consists of processes on the same shared-memory domain, allows users have read-only access to both locally-owned and ghost values of processes combined in the shared-memory communicator. See the general documentation of this class for more information about this argument.

      -
      Note
      In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
      +
      Note
      In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
      @@ -1757,7 +1757,7 @@
      -

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      +

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      @@ -1777,7 +1777,7 @@
      -

      Return the square of the $l_2$ norm of the vector.

      +

      Return the square of the $l_2$ norm of the vector.

      @@ -1827,7 +1827,7 @@
      return_value = *this * W;

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2453,7 +2453,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-12-27 18:25:06.244856160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-12-27 18:25:06.252856215 +0000 @@ -237,7 +237,7 @@
      std::function< void(Range &v, bool omit_zeroing_entries)> reinit_range_vector

      that store the knowledge how to initialize (resize + internal data structures) an arbitrary vector of the Range and Domain space.

      The primary purpose of this class is to provide syntactic sugar for complex matrix-vector operations and free the user from having to create, set up and handle intermediate storage locations by hand.

      -

      As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

      +

      As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

      #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
      /usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-12-27 18:25:06.288856462 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-12-27 18:25:06.292856490 +0000 @@ -204,11 +204,11 @@

      In the most essential use of manifolds, manifold descriptions are used to create a "point between other points". For example, when a triangulation creates a new vertex on a cell, face, or edge, it determines the new vertex' coordinates through the following function call:

      ...
      Point<spacedim> new_vertex = manifold.get_new_point (points,weights);
      ...
      -

      Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

      +

      Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

      Note
      Unlike almost all other cases in the library, we here interpret the points to be in real space, not on the reference cell.

      Manifold::get_new_point() has a default implementation that can simplify this process somewhat: Internally, the function calls the Manifold::get_intermediate_point() to compute pair-wise intermediate points. Internally the Manifold::get_intermediate_point() calls the Manifold::project_to_manifold() function after computing the convex combination of the given points. This allows derived classes to only overload Manifold::project_to_manifold() for simple situations. This is often useful when describing manifolds that are embedded in higher dimensional space, e.g., the surface of a sphere. In those cases, the desired new point may be computed simply by the (weighted) average of the provided points, projected back out onto the sphere.

      Common use case: Computing tangent vectors

      -

      The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

      +

      The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

      To make this possible, the Manifold class provides a member function (to be implemented by derived classes) that computes a "vector tangent to the manifold at one point, in direction of another point" via the Manifold::get_tangent_vector() function. For example, in 2d, one would use this function with the two vertices of an edge at the boundary to compute a "tangential" vector along the edge, and then get the normal vector by rotation by 90 degrees. In 3d, one would compute the two vectors "tangential" to the two edges of a boundary face adjacent to a boundary vertex, and then take the cross product of these two to obtain a vector normal to the boundary.

      For reasons that are more difficult to understand, these direction vectors are normalized in a very specific way, rather than to have unit norm. See the documentation of Manifold::get_tangent_vector(), as well as below, for more information.

      @@ -216,11 +216,11 @@

      A unified description

      The "real" way to understand what this class does is to see it in the framework of differential geometry. More specifically, differential geometry is fundamentally based on the assumption that two sufficiently close points are connected via a line of "shortest distance". This line is called a "geodesic", and it is selected from all other lines that connect the two points by the property that it is shortest if distances are measured in terms of the "metric" that describes a manifold. To give examples, recall that the geodesics of a flat manifold (implemented in the FlatManifold class) are simply the straight lines connecting two points, whereas for spherical manifolds (see the SphericalManifold class) geodesics between two points of same distance are the grand circles, and are in general curved lines when connecting two lines of different distance from the origin.

      In the following discussion, and for the purposes of implementing the current class, the concept of "metrics" that is so fundamental to differential geometry is no longer of great importance to us. Rather, everything can simply be described by postulating the existence of geodesics connecting points on a manifold.

      -

      Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

      -

      In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

      -

      Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

      -

      Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
-\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

      +

      Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

      +

      In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

      +

      Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

      +

      Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
+\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

      Definition at line 285 of file manifold.h.

      Member Typedef Documentation

      @@ -648,11 +648,11 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      -

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      -

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      +

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      +

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-12-27 18:25:06.356856929 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-12-27 18:25:06.360856957 +0000 @@ -245,7 +245,7 @@ class Mapping< dim, spacedim >

      Abstract base class for mapping classes.

      This class declares the interface for the functionality to describe mappings from the reference (unit) cell to a cell in real space, as well as for filling the information necessary to use the FEValues, FEFaceValues, and FESubfaceValues classes. Concrete implementations of these interfaces are provided in derived classes.

      Mathematics of the mapping

      -

      The mapping is a transformation $\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
+<p>The mapping is a transformation <picture><source srcset=$\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
 \hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

      \[
 J(\hat{\mathbf  x}) = \left(\begin{matrix}
@@ -1003,7 +1003,7 @@
 </table>
 </div><div class=

      Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

      -

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      +

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
      • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
      • @@ -1350,18 +1350,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -1416,35 +1416,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1501,21 +1501,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      @@ -1565,40 +1565,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 2024-12-27 18:25:06.436857478 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 2024-12-27 18:25:06.440857506 +0000 @@ -874,18 +874,18 @@

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

        Jacobians of spacedim-vector valued differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

      @@ -940,35 +940,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -1025,21 +1025,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1089,40 +1089,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

      @@ -1459,7 +1459,7 @@

      Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

      -

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      +

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
      • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
      • @@ -1521,7 +1521,7 @@

      Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

      -

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      +

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
      • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
      • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 2024-12-27 18:25:06.504857945 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 2024-12-27 18:25:06.508857973 +0000 @@ -231,9 +231,9 @@

        Detailed Description

        template<int dim, int spacedim = dim>
        class MappingCartesian< dim, spacedim >

        A class providing a mapping from the reference cell to cells that are axiparallel, i.e., that have the shape of rectangles (in 2d) or boxes (in 3d) with edges parallel to the coordinate directions. The class therefore provides functionality that is equivalent to what, for example, MappingQ would provide for such cells. However, knowledge of the shape of cells allows this class to be substantially more efficient.

        -

        Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates $\hat {\mathbf
-x}$ to real coordinates $\mathbf x$ on each cell is of the form

        -\begin{align*}
+<p>Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates  <picture><source srcset=$\hat {\mathbf
+x}$ to real coordinates $\mathbf x$ on each cell is of the form

        +\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -242,10 +242,10 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1388.png"/>

        in 2d, and

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -255,9 +255,9 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1389.png"/>

        -

        in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

        +

        in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

        The class is intended for efficiency, and it does not do a whole lot of error checking. If you apply this mapping to a cell that does not conform to the requirements above, you will get strange results.

        Definition at line 78 of file mapping_cartesian.h.

        @@ -656,18 +656,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -724,35 +724,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -811,21 +811,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -877,40 +877,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1173,7 +1173,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-12-27 18:25:06.572858412 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-12-27 18:25:06.576858440 +0000 @@ -715,18 +715,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -783,35 +783,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -870,21 +870,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -936,40 +936,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1190,7 +1190,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 2024-12-27 18:25:06.648858934 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 2024-12-27 18:25:06.652858962 +0000 @@ -785,18 +785,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -853,35 +853,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -940,21 +940,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1006,40 +1006,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1304,7 +1304,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 2024-12-27 18:25:06.688859209 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 2024-12-27 18:25:06.692859236 +0000 @@ -735,7 +735,7 @@
        -

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        +

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        Computed on each cell.

        Definition at line 467 of file mapping_fe_field.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2024-12-27 18:25:06.732859511 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2024-12-27 18:25:06.736859538 +0000 @@ -757,7 +757,7 @@
        -

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        +

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        Computed on each cell.

        Definition at line 362 of file mapping_fe.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 2024-12-27 18:25:06.792859923 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 2024-12-27 18:25:06.792859923 +0000 @@ -633,18 +633,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -701,35 +701,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -788,21 +788,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -854,40 +854,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1108,7 +1108,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 2024-12-27 18:25:06.832860198 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 2024-12-27 18:25:06.836860226 +0000 @@ -526,7 +526,7 @@
        -

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        +

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        Computed on each cell.

        Definition at line 298 of file mapping_manifold.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 2024-12-27 18:25:06.912860747 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 2024-12-27 18:25:06.920860802 +0000 @@ -266,7 +266,7 @@

        Detailed Description

        template<int dim, int spacedim = dim>
        -class MappingQ< dim, spacedim >

        This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

        +class MappingQ< dim, spacedim >

        This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

        Behavior along curved boundaries and with different manifolds

        For a number of applications, one only knows a manifold description of a surface but not the interior of the computational domain. In such a case, a FlatManifold object will be assigned to the interior entities that describes a usual planar coordinate system where the additional points for the higher order mapping are placed exactly according to a bi-/trilinear mapping. When combined with a non-flat manifold on the boundary, for example a circle bulging into the interior of a square cell, the two manifold descriptions are in general incompatible. For example, a FlatManifold defined solely through the cell's vertices would put an interior point located at some small distance epsilon away from the boundary along a straight line and thus in general outside the concave part of a circle. If the polynomial degree of MappingQ is sufficiently high, the transformation from the reference cell to such a cell would in general contain inverted regions close to the boundary.

        In order to avoid this situation, this class applies an algorithm for making this transition smooth using a so-called transfinite interpolation that is essentially a linear blend between the descriptions along the surrounding entities. In the algorithm that computes additional points, the compute_mapping_support_points() method, all the entities of the cells are passed through hierarchically, starting from the lines to the quads and finally hexes. Points on objects higher up in the hierarchy are obtained from the manifold associated with that object, taking into account all the points previously computed by the manifolds associated with the lower-dimensional objects, not just the vertices. If only a line is assigned a curved boundary but the adjacent quad is on a flat manifold, the flat manifold on the quad will take the points on the deformed line into account when interpolating the position of the additional points inside the quad and thus always result in a well-defined transformation.

        @@ -800,18 +800,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -868,35 +868,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -955,21 +955,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1021,40 +1021,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1365,7 +1365,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 2024-12-27 18:25:06.984861242 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 2024-12-27 18:25:06.992861297 +0000 @@ -729,18 +729,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -795,35 +795,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -880,21 +880,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -944,40 +944,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1314,7 +1314,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • @@ -1376,7 +1376,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 2024-12-27 18:25:07.064861791 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 2024-12-27 18:25:07.068861819 +0000 @@ -866,18 +866,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -932,35 +932,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1017,21 +1017,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1081,40 +1081,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1451,7 +1451,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 2024-12-27 18:25:07.160862450 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 2024-12-27 18:25:07.156862423 +0000 @@ -516,7 +516,7 @@ const std::function< std::vector< Point< spacedim > >(const typename Triangulation< dim, spacedim >::cell_iterator &)> & compute_points_on_cell&#href_anchor"memdoc"> -

          Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

          +

          Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

          Note
          If multiple threads are enabled, this function will run in parallel, invoking the function passed in several times. Thus, in case MultithreadInfo::n_threads()>1, the user code must make sure that the function, typically a lambda, does not write into data shared with other threads.
          The cache is invalidated upon the signal Triangulation::Signals::any_change of the underlying triangulation.
          @@ -1087,18 +1087,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -1153,35 +1153,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1238,21 +1238,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1302,40 +1302,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1672,7 +1672,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • @@ -1734,7 +1734,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 2024-12-27 18:25:07.228862917 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 2024-12-27 18:25:07.236862972 +0000 @@ -897,18 +897,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -963,35 +963,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1048,21 +1048,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1112,40 +1112,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1482,7 +1482,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 2024-12-27 18:25:07.276863247 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 2024-12-27 18:25:07.276863247 +0000 @@ -387,7 +387,7 @@

        Number of shape functions. If this is a Q1 mapping, then it is simply the number of vertices per cell. However, since also derived classes use this class (e.g. the Mapping_Q() class), the number of shape functions may also be different.

        -

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        +

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        Definition at line 372 of file mapping_q.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-12-27 18:25:07.416864208 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-12-27 18:25:07.424864263 +0000 @@ -401,7 +401,7 @@
        -

        Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        Definition at line 2000 of file quadrature_generator.cc.

        @@ -431,7 +431,7 @@
        -

        Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        Definition at line 2008 of file quadrature_generator.cc.

        @@ -461,8 +461,8 @@
        -

        Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        -
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.
        +

        Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        +
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.

        Definition at line 2017 of file quadrature_generator.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-12-27 18:25:07.448864428 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-12-27 18:25:07.452864455 +0000 @@ -388,7 +388,7 @@
        -

        Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        Definition at line 1871 of file quadrature_generator.cc.

        @@ -418,7 +418,7 @@
        -

        Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        Definition at line 1880 of file quadrature_generator.cc.

        @@ -448,8 +448,8 @@
        -

        Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        -
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.
        +

        Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        +
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.

        Definition at line 1889 of file quadrature_generator.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-12-27 18:25:07.544865087 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-12-27 18:25:07.548865115 +0000 @@ -520,7 +520,7 @@ const unsigned int quadrature_point&#href_anchor"memdoc">

        Returns the surface gradient of the shape function with index function_no at the quadrature point with index quadrature_point.

        -

        The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

        +

        The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients | update_normal_vectors flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 137 of file fe_immersed_values.cc.

        @@ -695,7 +695,7 @@

        If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

        Parameters
        - +
        iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
        iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
        q_pointNumber of the quadrature point at which function is to be evaluated
        @@ -734,7 +734,7 @@

        Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

        Parameters
        - +
        iNumber of the shape function $\varphi_i$ to be evaluated.
        iNumber of the shape function $\varphi_i$ to be evaluated.
        q_pointNumber of the quadrature point at which function is to be evaluated.
        componentvector component to be evaluated.
        @@ -771,7 +771,7 @@

        The same holds for the arguments of this function as for the shape_value() function.

        Parameters
        - +
        iNumber of the shape function $\varphi_i$ to be evaluated.
        iNumber of the shape function $\varphi_i$ to be evaluated.
        q_pointNumber of the quadrature point at which function is to be evaluated.
        @@ -965,11 +965,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
        [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
        -
        Postcondition
        values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
        +
        Postcondition
        values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 487 of file fe_values_base.cc.

        @@ -999,7 +999,7 @@

        This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
        +
        Postcondition
        values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 505 of file fe_values_base.cc.

        @@ -1160,11 +1160,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        -
        Postcondition
        gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
        +
        Postcondition
        gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1196,7 +1196,7 @@

        This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        +
        Postcondition
        gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 683 of file fe_values_base.cc.

        @@ -1303,11 +1303,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        -
        Postcondition
        hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
        +
        Postcondition
        hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1344,7 +1344,7 @@

        This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        +
        Postcondition
        hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 786 of file fe_values_base.cc.

        @@ -1451,11 +1451,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
        [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
        -
        Postcondition
        laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
        +
        Postcondition
        laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
        For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        @@ -1489,7 +1489,7 @@

        This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
        +
        Postcondition
        laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
        For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1636,11 +1636,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        -
        Postcondition
        third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
        +
        Postcondition
        third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1677,7 +1677,7 @@

        This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        +
        Postcondition
        third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 1006 of file fe_values_base.cc.

        @@ -1992,7 +1992,7 @@

        Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

        For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

        -

        You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

        +

        You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -2045,7 +2045,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2099,7 +2099,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2153,7 +2153,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2207,7 +2207,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2261,8 +2261,8 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-12-27 18:25:07.596865444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-12-27 18:25:07.596865444 +0000 @@ -173,11 +173,11 @@

      Detailed Description

      template<int dim>
      -class NonMatching::FEInterfaceValues< dim >

      This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

      -\[
+class NonMatching::FEInterfaceValues< dim ></div><p>This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to <a class=NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

      +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \},
-\] +\]" src="form_2117.png"/>

      which we as before refer to as the "inside" and "outside" regions of the face.

      @@ -210,7 +210,7 @@
      }
      }
      -

      To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

      +

      To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

      Definition at line 488 of file fe_values.h.

      Member Typedef Documentation

      @@ -364,7 +364,7 @@ - + @@ -502,7 +502,7 @@
      mapping_collectionCollection of Mappings to be used.
      fe_collectionCollection of FiniteElements to be used.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
      q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
      mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
      region_update_flagsStruct storing UpdateFlags for the inside/outside region of the cell.
      -

      Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      +

      Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

      Definition at line 537 of file fe_values.cc.

      @@ -525,7 +525,7 @@
      -

      Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      +

      Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

      Definition at line 549 of file fe_values.cc.

      @@ -556,7 +556,7 @@
      -

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

      +

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

      Definition at line 397 of file fe_values.cc.

      @@ -847,7 +847,7 @@
      -

      FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 702 of file fe_values.h.

      @@ -874,7 +874,7 @@
      -

      FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 711 of file fe_values.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-12-27 18:25:07.632865691 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-12-27 18:25:07.640865746 +0000 @@ -177,17 +177,17 @@

      Detailed Description

      template<int dim>
      -class NonMatching::FEValues< dim >

      This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

      -\[
+class NonMatching::FEValues< dim ></div><p>This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, <picture><source srcset=$\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

      +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2114.png"/>

      Thus we need quadrature rules for these 3 regions:

      -

      As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

      +

      As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

      for (const auto &cell : dof_handler.active_cell_iterators())
      {
      @@ -208,7 +208,7 @@
      }
      std::optional<::FEValues< dim > > fe_values_inside
      Definition fe_values.h:397
      -

      Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

      +

      Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

      Definition at line 143 of file fe_values.h.

      Member Typedef Documentation

      @@ -359,7 +359,7 @@ - + @@ -464,7 +464,7 @@
      mapping_collectionCollection of Mappings to be used.
      fe_collectionCollection of FiniteElements to be used.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
      q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
      mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
      region_update_flagsStruct storing UpdateFlags for the inside/outside/surface region of the cell.
      -

      Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      +

      Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

      Definition at line 306 of file fe_values.cc.

      @@ -487,7 +487,7 @@
      -

      Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      +

      Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

      Definition at line 318 of file fe_values.cc.

      @@ -510,7 +510,7 @@
      -

      Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

      +

      Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is not intersected, the returned optional will not contain a value.

      Definition at line 330 of file fe_values.cc.

      @@ -583,7 +583,7 @@
      -

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

      +

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

      Definition at line 101 of file fe_values.cc.

      @@ -800,7 +800,7 @@
      -

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

      +

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

      When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

      This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

      @@ -829,7 +829,7 @@
      -

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

      +

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

      When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

      This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

      @@ -858,7 +858,7 @@
      -

      FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 397 of file fe_values.h.

      @@ -885,7 +885,7 @@
      -

      FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 406 of file fe_values.h.

      @@ -912,7 +912,7 @@
      -

      FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 415 of file fe_values.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-12-27 18:25:07.664865911 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-12-27 18:25:07.672865966 +0000 @@ -149,16 +149,16 @@

      Detailed Description

      template<int dim>
      -class NonMatching::FaceQuadratureGenerator< dim >

      This class creates immersed quadrature rules over a face, $F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

      -

      In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

      -\[
+class NonMatching::FaceQuadratureGenerator< dim ></div><p>This class creates immersed quadrature rules over a face, <picture><source srcset=$F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

      +

      In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

      +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \}, \\
 S = \{x \in F : \psi(x) = 0 \},
-\] +\]" src="form_2157.png"/>

      -

      which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

      -

      Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

      +

      which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

      +

      Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

      Definition at line 305 of file quadrature_generator.h.

      Member Typedef Documentation

      @@ -305,7 +305,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2000 of file quadrature_generator.cc.

      @@ -327,7 +327,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2008 of file quadrature_generator.cc.

      @@ -349,8 +349,8 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 2017 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-12-27 18:25:07.696866131 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-12-27 18:25:07.700866158 +0000 @@ -282,7 +282,7 @@

      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2098 of file quadrature_generator.cc.

      @@ -304,7 +304,7 @@

      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2105 of file quadrature_generator.cc.

      @@ -324,7 +324,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

      Note
      In 1d, this quadrature always contains 0 points.

      Definition at line 2113 of file quadrature_generator.cc.

      @@ -371,7 +371,7 @@
      -

      Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

      +

      Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

      Definition at line 498 of file quadrature_generator.h.

      @@ -396,7 +396,7 @@
      -

      Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

      +

      Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

      Definition at line 505 of file quadrature_generator.h.

      @@ -421,7 +421,7 @@
      -

      Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

      +

      Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

      Definition at line 512 of file quadrature_generator.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-12-27 18:25:07.720866296 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-12-27 18:25:07.728866351 +0000 @@ -139,41 +139,41 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim >

      This class defines a quadrature formula to integrate over the intersection between an oriented surface, $\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

      -

      The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

      -

      Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

      -\[
+class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim ></div><p>This class defines a quadrature formula to integrate over the intersection between an oriented surface, <picture><source srcset=$\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

      +

      The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

      +

      Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

      +\[
 \int_{S\cap K} f dS =
 \int_{S\cap K} f |d\bar{S}| =
 \int_{\hat{S}\cap\hat{K}} f \circ F_{K} \det(J) |\left( J^{-1} \right
 )^T d\hat{S}|,
-\] +\]" src="form_2131.png"/>

      -

      where $F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

      -\[
+<p> where <picture><source srcset=$F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

      +\[
 \Delta \hat{S}_q \dealcoloneq w_q \hat{n}_q \approx d\hat{S}(\hat{x}_q),
-\] +\]" src="form_2134.png"/>

      for each quadrature point. The surface integral in real space would then be approximated as

      -\[
+<picture><source srcset=\[
 \int_{S\cap K} f dS \approx
 \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \det(J_q)
 |\left( J_q^{-1} \right)^T \hat{n}_q| w_q.
-\] +\]" src="form_2135.png"/>

      -

      When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let $\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

      -\[
+<p>When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let <picture><source srcset=$\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

      +\[
 \int_{S\cap F} f dr
 = \int_{0}^T f(\bar{r}(t)) \left \|\frac{d\bar{r}}{dt} \right \| dt
 = \int_{0}^T f(F_K(\hat{r}(t))) \left \| J \frac{d\hat{r}}{dt} \right \| dt
 \approx \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \|J(\hat{x}_q)
 \hat{t}_q \| w_q,
-\] +\]" src="form_2141.png"/>

      -

      where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

      +

      where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

      Definition at line 106 of file immersed_surface_quadrature.h.

      Constructor & Destructor Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-12-27 18:25:07.752866515 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-12-27 18:25:07.756866543 +0000 @@ -147,24 +147,24 @@

      Detailed Description

      template<int dim>
      -class NonMatching::QuadratureGenerator< dim >

      This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

      +class NonMatching::QuadratureGenerator< dim >

      This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

      This class creates quadrature rules for the intersections between the box and the three different regions defined by the level set function. That is, it creates quadrature rules to integrate over the following regions

      -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi(x) < 0 \}, \\
 P = \{x \in B : \psi(x) > 0 \}, \\
 S = \{x \in B : \psi(x) = 0 \}.
-\] +\]" src="form_2151.png"/>

      -

      When working with level set functions, the most common is to describe a domain, $\Omega$, as

      -\[
+<p>When working with level set functions, the most common is to describe a domain, <picture><source srcset=$\Omega$, as

      +\[
 \Omega = \{ x \in \mathbb{R}^{dim} : \psi(x) < 0 \}.
-\] +\]" src="form_2152.png"/>

      -

      Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

      -

      The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

      +

      Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

      +

      The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

      A detailed description of the underlying algorithm can be found in "High-Order %Quadrature Methods for Implicitly Defined Surfaces and Volumes in Hyperrectangles, R. I. Saye, SIAM J. Sci. Comput., 37(2), <a href="http://www.dx.doi.org/10.1137/140966290"> @@ -305,7 +305,7 @@

      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1871 of file quadrature_generator.cc.

      @@ -327,7 +327,7 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1880 of file quadrature_generator.cc.

      @@ -349,8 +349,8 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 1889 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-12-27 18:25:07.804866872 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-12-27 18:25:07.808866900 +0000 @@ -258,7 +258,7 @@

      where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

      Note that this class is similar to FEFieldFunction, but that FEFieldFunction implements the following function on a given cell, $K$,

      $f(x) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(F_K^{-1}(x))$,

      -

      which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

      +

      which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

      Before calling the value/gradient/hessian function, the set_active_cell function must be called to specify which cell the function should be evaluated on.

      Definition at line 1335 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-12-27 18:25:07.844867147 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-12-27 18:25:07.852867202 +0000 @@ -163,20 +163,20 @@

      Detailed Description

      template<int dim, int spacedim>
      class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< dim, spacedim >

      This class implements the Saye-algorithm cited in the documentation of the QuadratureGenerator class.

      -

      The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

      -

      If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

      -

      If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

      -

      The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

      -

      $|\frac{\partial \psi}{\partial x_i}| > 0$.

      +

      The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

      +

      If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

      +

      If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

      +

      The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

      +

      $|\frac{\partial \psi}{\partial x_i}| > 0$.

      throughout the whole box. This means that the zero-contour of the level set function can be parameterized by an implicit function

      -

      $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

      +

      $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

      so that

      -

      $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

      -

      over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

      -

      If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

      +

      $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

      +

      over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

      +

      If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

      When we reach the base case, $dim = 1$, the creation of QPartitioning<1> is simple. See the documentation in specialized class: QGenerator<1, spacedim>.

      As we go up through the dimensions and create the higher dimensional quadratures, we need to know the function value of the height functions at the lower dimensional quadrature points. Since the functions are implicit, we need to do root-finding on the level set functions to find the function values. For this we use the class UpThroughDimensionCreator, see documentation there.

      -

      When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

      +

      When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

      As we try to find a height function direction, we estimate bounds on the gradient components by approximating each component as a 1st-order Taylor-polynomial. If a direction can not be found, the box is split and we recurse on each smaller box. This makes an implicit function more likely to exist since we seek it over a smaller portion of the zero contour. It also makes the estimated bounds tighter since we extrapolate the Taylor-polynomial a shorter distance.

      Since we can not split a box forever, there is an maximum number of allowed splits on the additional data struct passed to the constructor. If this is reached, the algorithm uses the midpoint method as a last resort.

      @@ -326,7 +326,7 @@
      -

      Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

      +

      Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

      Definition at line 1141 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:07.880867394 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:07.888867449 +0000 @@ -164,8 +164,8 @@

      Detailed Description

      template<int spacedim>
      class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< 1, spacedim >

      The 1d-base case of the recursive algorithm QGenerator<dim, spacedim>.

      -

      Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

      -

      If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

      +

      Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

      +

      If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

      Definition at line 1276 of file quadrature_generator.h.

      Constructor & Destructor Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-12-27 18:25:07.912867614 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-12-27 18:25:07.916867641 +0000 @@ -134,19 +134,19 @@

      Detailed Description

      template<int dim>
      -class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

      Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

      -

      $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

      -

      the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

      +class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

      Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

      +

      $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

      +

      the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

      -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi_i(x) < 0, \forall i \}, \\
 P = \{x \in B : \psi_i(x) > 0, \forall i \}, \\
 I = B \setminus (\overline{N} \cup \overline{P}).
-\] +\]" src="form_2176.png"/>

      -

      Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

      -

      $S = \{x \in B : \psi(x) = 0 \}$.

      -

      Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

      +

      Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

      +

      $S = \{x \in B : \psi(x) = 0 \}$.

      +

      Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

      Definition at line 815 of file quadrature_generator.h.

      Member Function Documentation

      @@ -208,7 +208,7 @@
      -

      Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

      +

      Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

      Definition at line 835 of file quadrature_generator.h.

      @@ -227,7 +227,7 @@
      -

      Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

      +

      Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

      Definition at line 841 of file quadrature_generator.h.

      @@ -265,7 +265,7 @@
      -

      Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

      +

      Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

      Definition at line 853 of file quadrature_generator.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-12-27 18:25:07.940867806 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-12-27 18:25:07.948867861 +0000 @@ -135,7 +135,7 @@  

      Detailed Description

      -

      A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

      +

      A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

      The bounds on the functions values are estimated using the function taylor_estimate_function_bounds, which approximates the function as a second order Taylor-polynomial around the interval midpoint. When we have a sign change on an interval, this class uses boost::math::tools::toms748_solve for finding roots .

      Definition at line 664 of file quadrature_generator.h.

      @@ -181,7 +181,7 @@ std::vector< double > & roots&#href_anchor"memdoc"> -

      For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

      +

      For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

      Definition at line 533 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-12-27 18:25:07.972868026 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-12-27 18:25:07.976868053 +0000 @@ -144,13 +144,13 @@

      Detailed Description

      template<int dim, int spacedim>
      -class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

      This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

      -

      To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

      -

      For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

      -

      In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

      -

      When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

      -

      $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

      -

      where $i$ is the height function direction.

      +class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

      This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

      +

      To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

      +

      For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

      +

      In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

      +

      When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

      +

      $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

      +

      where $i$ is the height function direction.

      Definition at line 896 of file quadrature_generator.h.

      Constructor & Destructor Documentation

      @@ -210,7 +210,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

      Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

      +

      Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

      Definition at line 748 of file quadrature_generator.cc.

      @@ -283,7 +283,7 @@

      Create a surface quadrature point from the lower-dimensional point and add it to surface_quadrature.

      -

      This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

      +

      This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

      Definition at line 804 of file quadrature_generator.cc.

      @@ -392,7 +392,7 @@
      -

      1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

      +

      1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

      Definition at line 966 of file quadrature_generator.h.

      @@ -446,7 +446,7 @@
      -

      The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

      +

      The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

      Definition at line 979 of file quadrature_generator.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-12-27 18:25:08.004868246 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-12-27 18:25:08.012868301 +0000 @@ -530,11 +530,11 @@

      A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian).

      The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

      -

      In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

      +

      In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

      Parameters
      - +
      current_uCurrent value of $u$
      current_uCurrent value of $u$
      @@ -562,7 +562,7 @@
      Parameters
      - +
      [in]rhsThe system right hand side to solve for.
      [out]dstThe solution of $J^{-1} * \texttt{src}$.
      [out]dstThe solution of $J^{-1} * \texttt{src}$.
      [in]toleranceThe tolerance with which to solve the linear system of equations.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-12-27 18:25:08.040868493 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-12-27 18:25:08.044868520 +0000 @@ -260,7 +260,7 @@ solver_typeNonlinear solver type. strategyMethod of solving the nonlinear problem. maximum_non_linear_iterationsMaximum number of nonlinear iterations. - function_toleranceAbsolute stopping tolerance for the norm of the residual $F(u)$. + function_toleranceAbsolute stopping tolerance for the norm of the residual $F(u)$. relative_toleranceRelative stopping tolerance. step_toleranceTolerance for minimum scaled step length anderson_subspace_sizeSize of the Anderson acceleration subspace, use 0 to disable. @@ -343,7 +343,7 @@
      -

      A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

      +

      A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

      If set to zero, default values will be used.

      Definition at line 177 of file nonlinear.h.

      @@ -363,7 +363,7 @@
      -

      Relative $l_2$ tolerance of the residual to be reached.

      +

      Relative $l_2$ tolerance of the residual to be reached.

      Note
      Solver terminates successfully if either the function tolerance or the relative tolerance has been reached.

      Definition at line 185 of file nonlinear.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-12-27 18:25:08.084868795 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-12-27 18:25:08.088868822 +0000 @@ -573,7 +573,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

      Refer to the general documentation of this class for more information.

      @@ -603,24 +603,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -629,11 +629,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-12-27 18:25:08.132869125 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-12-27 18:25:08.136869152 +0000 @@ -487,7 +487,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -496,7 +496,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, spacedim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-12-27 18:25:08.180869454 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-12-27 18:25:08.184869482 +0000 @@ -448,7 +448,7 @@
      -

      Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function.

      Refer to the general documentation of this class for more information.

      @@ -637,7 +637,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

      Refer to the general documentation of this class for more information.

      @@ -667,24 +667,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -693,11 +693,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-12-27 18:25:08.224869756 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-12-27 18:25:08.228869784 +0000 @@ -481,7 +481,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -490,7 +490,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, spacedim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-12-27 18:25:08.272870086 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-12-27 18:25:08.276870113 +0000 @@ -481,7 +481,7 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -490,7 +490,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, spacedim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-12-27 18:25:08.320870415 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-12-27 18:25:08.324870443 +0000 @@ -286,7 +286,7 @@

      Detailed Description

      template<typename VectorType>
      class PArpackSolver< VectorType >

      Interface for using PARPACK. PARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines pdneupd, pdseupd, pdnaupd, pdsaupd of PARPACK. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

      -

      In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

      +

      In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

      The ArpackSolver can be used in application codes in the following way:

      const unsigned int num_arnoldi_vectors = 2*size_of_spectrum + 2;
      @@ -311,8 +311,8 @@
      const AdditionalData additional_data
      -

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

      -

      Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

      +

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

      +

      Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

      The OP can be specified by using a LinearOperator:

      const double shift = 5.0;
      const auto op_A = linear_operator<vector_t>(A);
      const auto op_B = linear_operator<vector_t>(B);
      @@ -645,7 +645,7 @@ const unsigned int n_eigenvalues&#href_anchor"memdoc"> -

      Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

      +

      Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

      In mode=3, inverse should correspond to $[A-\sigma B]^{-1}$, whereas in mode=2 it should represent $B^{-1}$. For mode=1 both B and inverse are ignored.

      Definition at line 769 of file parpack_solver.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-12-27 18:25:08.356870663 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-12-27 18:25:08.360870690 +0000 @@ -304,7 +304,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

      Reinitialization that takes the number of locally-owned degrees of freedom local_size and an index set for the required ghost indices ghost_indices.

      -

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      +

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      The export_to_ghost_array will populate an array containing values from locally-owned AND ghost indices, as for the relevant set of dofs of a usual FEM simulation.

      Definition at line 49 of file petsc_communication_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-12-27 18:25:08.416871075 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-12-27 18:25:08.420871103 +0000 @@ -1518,8 +1518,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -1547,8 +1547,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -1604,7 +1604,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -2351,7 +2351,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2385,8 +2385,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-12-27 18:25:08.504871680 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-12-27 18:25:08.496871625 +0000 @@ -888,7 +888,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Definition at line 408 of file petsc_block_sparse_matrix.h.

      @@ -1000,7 +1000,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Definition at line 439 of file petsc_block_sparse_matrix.h.

      @@ -2050,7 +2050,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -2624,7 +2624,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      @@ -2732,7 +2732,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:08.576872174 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:08.580872201 +0000 @@ -1956,7 +1956,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -1982,7 +1982,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -2034,7 +2034,7 @@
      -

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      +

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      @@ -2060,7 +2060,7 @@
      -

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      +

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      @@ -2086,7 +2086,7 @@
      -

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      +

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      @@ -2121,7 +2121,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2368,7 +2368,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-12-27 18:25:08.652872696 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-12-27 18:25:08.656872723 +0000 @@ -827,7 +827,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      @@ -850,7 +850,7 @@ const Vector & v&#href_anchor"memdoc"> -

      Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Definition at line 814 of file petsc_parallel_sparse_matrix.cc.

      @@ -2072,8 +2072,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -2101,8 +2101,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -2158,7 +2158,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -2905,7 +2905,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2939,8 +2939,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:08.724873190 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:08.728873218 +0000 @@ -1941,7 +1941,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      Definition at line 604 of file petsc_vector_base.cc.

      @@ -1997,7 +1997,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      Note
      In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

      Definition at line 664 of file petsc_vector_base.cc.

      @@ -2026,7 +2026,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      Definition at line 677 of file petsc_vector_base.cc.

      @@ -2054,7 +2054,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      Definition at line 690 of file petsc_vector_base.cc.

      @@ -2082,7 +2082,7 @@
      -

      $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

      +

      $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

      Definition at line 732 of file petsc_vector_base.cc.

      @@ -2119,7 +2119,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< PetscScalar > &values)

      The reason this function exists is for compatibility with deal.II's own vector classes which can implement this functionality with less memory transfer. However, for PETSc vectors such a combined operation is not natively supported and thus the cost is completely equivalent as calling the two methods separately.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Definition at line 529 of file petsc_vector_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-12-27 18:25:08.784873602 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-12-27 18:25:08.788873630 +0000 @@ -1303,8 +1303,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -1324,8 +1324,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -1365,7 +1365,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -1972,7 +1972,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2006,8 +2006,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-12-27 18:25:08.864874151 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-12-27 18:25:08.868874179 +0000 @@ -1962,8 +1962,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -1991,8 +1991,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -2048,7 +2048,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -2675,7 +2675,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2709,8 +2709,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-12-27 18:25:08.900874399 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-12-27 18:25:08.904874426 +0000 @@ -194,7 +194,7 @@
      Mat & petsc_matrix();
      ...

      In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

      -

      To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

      +

      To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

      The default linearization procedure of a solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations. For details, consult the PETSc manual.

      Users can also provide the implementations of the Jacobian. This can be accomplished in two ways:

      • PETSc style using NonlinearSolver::jacobian
      • /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-12-27 18:25:08.972874893 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-12-27 18:25:08.980874948 +0000 @@ -1952,8 +1952,8 @@
        -

        Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        +

        Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        Definition at line 418 of file petsc_matrix_base.cc.

        @@ -1981,8 +1981,8 @@
        -

        Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        +

        Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        Definition at line 431 of file petsc_matrix_base.cc.

        @@ -2038,7 +2038,7 @@
        -

        Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

        +

        Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

        Obviously, the matrix needs to be quadratic for this operation.

        The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

        Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

        @@ -2785,7 +2785,7 @@

        Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

        -

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        +

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

        Definition at line 644 of file petsc_matrix_base.cc.

        @@ -2819,8 +2819,8 @@
        -

        Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

        -

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        +

        Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

        +

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

        Definition at line 652 of file petsc_matrix_base.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-12-27 18:25:09.028875278 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-12-27 18:25:09.032875305 +0000 @@ -216,20 +216,20 @@
        template<typename VectorType = PETScWrappers::VectorBase, typename PMatrixType = PETScWrappers::MatrixBase, typename AMatrixType = PMatrixType>
        class PETScWrappers::TimeStepper< VectorType, PMatrixType, AMatrixType >

        Interface to the PETSc TS solver for Ordinary Differential Equations and Differential-Algebraic Equations. The TS solver is described in the PETSc manual. This class is used and extensively discussed in step-86.

        This class supports two kinds of formulations. The explicit formulation:

        -\[
+<picture><source srcset=\[
       \begin{cases}
           \dot y = G(t,y)\, , \\
           y(t_0) = y_0\, , \\
       \end{cases}
-    \] + \]" src="form_1821.png"/>

        and the implicit formulation:

        -\[
+<picture><source srcset=\[
       \begin{cases}
           F(t,y,\dot y) = 0\, , \\
           y(t_0) = y_0\, . \\
       \end{cases}
-    \] + \]" src="form_1822.png"/>

        The interface to PETSc is realized by means of std::function callbacks like in the SUNDIALS::IDA (which also solves implicit ODES) and SUNDIALS::ARKode classes (which solves a slightly generalized form of the explicit formulation above that also allows for a mass matrix on the left hand side).

        TimeStepper supports any vector and matrix type having constructors and methods:

        @@ -247,7 +247,7 @@
        Mat & petsc_matrix();
        ...

        In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

        -

        To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

        +

        To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

        The default linearization procedure of an implicit solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations that are ODE-solver specific. For details, consult the PETSc manual.

        Users can also provide the implementations of the Jacobians. This can be accomplished in two ways:

        Callback for the computation of the implicit Jacobian $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
 y}$.

        -

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        +

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        Note
        This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

        Definition at line 501 of file petsc_ts.h.

        @@ -788,7 +788,7 @@

        Callback for the set up of the Jacobian system.

        This callback gives full control to users to set up the linearized equations $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
 y}$.

        -

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        +

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        Solvers must be provided via TimeStepper::solve_with_jacobian.

        Note
        This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
        /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-12-27 18:25:09.092875717 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-12-27 18:25:09.096875745 +0000 @@ -1169,7 +1169,7 @@
        -

        Return the square of the $l_2$-norm.

        +

        Return the square of the $l_2$-norm.

        Definition at line 604 of file petsc_vector_base.cc.

        @@ -1209,7 +1209,7 @@
        -

        $l_1$-norm of the vector. The sum of the absolute values.

        +

        $l_1$-norm of the vector. The sum of the absolute values.

        Note
        In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

        Definition at line 664 of file petsc_vector_base.cc.

        @@ -1230,7 +1230,7 @@
        -

        $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

        +

        $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

        Definition at line 677 of file petsc_vector_base.cc.

        @@ -1250,7 +1250,7 @@
        -

        $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

        +

        $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

        Definition at line 690 of file petsc_vector_base.cc.

        @@ -1270,7 +1270,7 @@
        -

        $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

        +

        $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

        Definition at line 732 of file petsc_vector_base.cc.

        @@ -1300,7 +1300,7 @@
        return_value = *this * W;
        void add(const std::vector< size_type > &indices, const std::vector< PetscScalar > &values)

        The reason this function exists is for compatibility with deal.II's own vector classes which can implement this functionality with less memory transfer. However, for PETSc vectors such a combined operation is not natively supported and thus the cost is completely equivalent as calling the two methods separately.

        -

        For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

        +

        For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

        Definition at line 529 of file petsc_vector_base.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-12-27 18:25:09.244876761 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-12-27 18:25:09.248876788 +0000 @@ -1057,7 +1057,7 @@ const typename Triangulation< dim, spacedim >::active_cell_iterator & cell&#href_anchor"memdoc">

        Insert a particle into the collection of particles. Return an iterator to the new position of the particle. This function involves a copy of the particle and its properties. Note that this function is of $O(N \log
-N)$ complexity for $N$ particles.

        +N)$" src="form_2511.png"/> complexity for $N$ particles.

        Definition at line 578 of file particle_handler.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-12-27 18:25:09.284877035 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-12-27 18:25:09.292877090 +0000 @@ -677,7 +677,7 @@
        -

        This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

        +

        This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

        Definition at line 191 of file property_pool.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-12-27 18:25:09.468878299 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-12-27 18:25:09.472878326 +0000 @@ -2225,7 +2225,7 @@
        -

        Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

        +

        Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

        The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

        Note
        This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
        @@ -6947,7 +6947,7 @@
        -

        Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

        +

        Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

        Note
        The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
        /usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-12-27 18:25:09.536878766 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-12-27 18:25:09.540878793 +0000 @@ -879,7 +879,7 @@
        -

        Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

        +

        Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

        Note
        This function can also be used in device code.
        @@ -1491,7 +1491,7 @@
        -

        Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

        +

        Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

        @@ -1517,7 +1517,7 @@
        -

        Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

        +

        Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

        @@ -2168,11 +2168,11 @@

        Entrywise multiplication of two tensor objects of general rank.

        This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

        -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_892.png"/>

        Template Parameters
        @@ -2207,17 +2207,17 @@
        -

        The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

        Note
        For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
        -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
        +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
  • Definition at line 3039 of file tensor.h.

    @@ -2245,7 +2245,7 @@
    -

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    Definition at line 3065 of file tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-12-27 18:25:09.584879095 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-12-27 18:25:09.592879150 +0000 @@ -458,7 +458,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -716,7 +716,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -748,24 +748,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -774,11 +774,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-12-27 18:25:09.620879343 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-12-27 18:25:09.628879397 +0000 @@ -156,7 +156,7 @@
    template<int dim>
    class PolynomialsBernardiRaugel< dim >

    This class implements the Bernardi-Raugel polynomials similarly to the description in the Mathematics of Computation paper from 1985 by Christine Bernardi and Geneviève Raugel.

    The Bernardi-Raugel polynomials are originally defined as an enrichment of the $(P_1)^d$ elements on simplicial meshes for Stokes problems by the addition of bubble functions, yielding a locking-free finite element which is a subset of $(P_2)^d$ elements. This implementation is an enrichment of $(Q_1)^d$ elements which is a subset of $(Q_2)^d$ elements for quadrilateral and hexahedral meshes.

    -

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    +

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    2d bubble functions (in order)

    $x=0$ edge: $\mathbf{p}_1 = \mathbf{n}_1 (1-x)(y)(1-y)$

     @f$x=1@f$ edge: @f$\mathbf{p}_2 = \mathbf{n}_2 (x)(y)(1-y)@f$
     
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html differs (HTML document, ASCII text, with very long lines)
    --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-12-27 18:25:09.664879645 +0000
    +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-12-27 18:25:09.664879645 +0000
    @@ -1245,7 +1245,7 @@
       
     
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html 2024-12-27 18:25:09.700879892 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html 2024-12-27 18:25:09.700879892 +0000 @@ -152,18 +152,18 @@

    This space is of the form Vk = RTk-1 + Bk, where Bk is defined as follows:

    In 2d:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  B_k^1(E) = \text{span}\left\{x^{a_1-1} y^{a_2}\begin{pmatrix} (a_2+1) x \\
    -a_1 y \end{pmatrix}\text{ : } a_2=k \right\} \\
  B_k^2(E) = \text{span}\left\{x^{b_1} y^{b_2-1}\begin{pmatrix} -b_2 x \\
     (b_1+1) y \end{pmatrix}\text{ : } b_1=k \right\}
-\end{align*} +\end{align*}" src="form_718.png"/>

    In 3d:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   B_k^1(E) = \text{span}\left\{x^{a_1-1} y^{a_2} z^{a_3}\begin{pmatrix}
 (a_2+a_3+2) x \\
     -a_1 y \\ -a_1 z \end{pmatrix}\text{ : } a_2=k \text{ or } a_3=k
@@ -175,11 +175,11 @@
   B_k^3(E) = \text{span}\left\{x^{c_1}y^{c_2}z^{c_3-1}\begin{pmatrix} -c_3 x
 \\ -c_3y \\ (c_1+c_2+2)z \end{pmatrix}\text{ : } c_1=k \text{ or } c_2=k
 \right\},
- \end{align*} + \end{align*}" src="form_719.png"/>

    -

    where $0 \le a_1, a_2, a_3 \le k$.

    +

    where $0 \le a_1, a_2, a_3 \le k$.

    Note
    Unlike the classical Raviart-Thomas space, the lowest order for the enhanced space is 1, similarly to the Brezzi-Douglas-Marini (BDM) polynomial space.

    The total dimension of the space dim(Vk) = d*(k+1)^d, where d is the space dimension. This allows to associate shape functions with the Gauss-Lobatto quadrature points as shown in the figures below.

    @@ -190,7 +190,7 @@

    - +
    Left - $2d,\,k=3$, right - $3d,\,k=2$.
    Left - $2d,\,k=3$, right - $3d,\,k=2$.

    Definition at line 90 of file polynomials_rt_bubbles.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-12-27 18:25:09.744880194 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-12-27 18:25:09.748880221 +0000 @@ -1197,7 +1197,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-12-27 18:25:09.792880524 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-12-27 18:25:09.796880551 +0000 @@ -1213,7 +1213,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-12-27 18:25:09.840880853 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-12-27 18:25:09.840880853 +0000 @@ -1311,7 +1311,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-12-27 18:25:09.880881128 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-12-27 18:25:09.884881155 +0000 @@ -1226,7 +1226,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-12-27 18:25:09.932881485 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-12-27 18:25:09.928881457 +0000 @@ -1187,7 +1187,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-12-27 18:25:09.968881732 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-12-27 18:25:09.972881760 +0000 @@ -232,7 +232,7 @@

    Detailed Description

    Lobatto polynomials of arbitrary degree on [0,1].

    -

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    +

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    Calling the constructor with a given index k will generate the polynomial with index k. But only for $k\geq 1$ the index equals the degree of the polynomial. For k==0 also a polynomial of degree 1 is generated.

    These polynomials are used for the construction of the shape functions of Nédélec elements of arbitrary order.

    @@ -1217,7 +1217,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-12-27 18:25:10.016882062 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-12-27 18:25:10.024882117 +0000 @@ -1290,7 +1290,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-12-27 18:25:10.068882419 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-12-27 18:25:10.072882446 +0000 @@ -1217,7 +1217,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-12-27 18:25:10.116882748 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-12-27 18:25:10.120882776 +0000 @@ -1210,7 +1210,7 @@
    -

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    +

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    Definition at line 131 of file polynomials_hermite.h.

    @@ -1336,7 +1336,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-12-27 18:25:10.152882995 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-12-27 18:25:10.160883050 +0000 @@ -226,7 +226,7 @@ x^{n+1} = x^{n} + \alpha P^{-1} (b-Ax^n). \]" src="form_1830.png"/>

    -

    The relaxation parameter $\alpha$ has to be in the range:

    +

    The relaxation parameter $\alpha$ has to be in the range:

    \[
  0 < \alpha < \frac{2}{\lambda_{\max}(P^{-1}A)}.
 \] /usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 2024-12-27 18:25:10.188883243 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 2024-12-27 18:25:10.196883298 +0000 @@ -234,8 +234,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-12-27 18:25:10.220883462 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-12-27 18:25:10.220883462 +0000 @@ -122,7 +122,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
     QGaussChebyshev (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussChebyshev< dim >

    Gauss-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
+class QGaussChebyshev< dim ></div><p>Gauss-Chebyshev quadrature rules integrate the weighted product <picture><source srcset=$\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.38

    Definition at line 558 of file quadrature_lib.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-12-27 18:25:10.240883600 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-12-27 18:25:10.244883627 +0000 @@ -123,7 +123,7 @@ class QGaussLobatto< dim >

    The Gauss-Lobatto family of quadrature rules for numerical integration.

    This modification of the Gauss quadrature uses the two interval end points as well. Being exact for polynomials of degree 2n-3, this formula is suboptimal by two degrees.

    The quadrature points are interval end points plus the roots of the derivative of the Legendre polynomial Pn-1 of degree n-1. The quadrature weights are 2/(n(n-1)(Pn-1(xi)2).

    -
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
+<dl class=
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
 = \beta = 0$) is a special case.
    See also
    http://en.wikipedia.org/wiki/Handbook_of_Mathematical_Functions
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-12-27 18:25:10.264883765 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-12-27 18:25:10.268883792 +0000 @@ -122,7 +122,7 @@
     QGaussLobattoChebyshev (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    +class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    Definition at line 627 of file quadrature_lib.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-12-27 18:25:10.292883957 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-12-27 18:25:10.296883984 +0000 @@ -132,8 +132,8 @@
    &#href_anchor"memitem:a9a003e3342b551507a0bab3fee019e40" id="r_a9a003e3342b551507a0bab3fee019e40">static std::vector< double > get_quadrature_weights (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussLog< dim >

    A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate $\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    -

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
+class QGaussLog< dim ></div><p>A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate <picture><source srcset=$\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    +

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
 f(x) \ln|x| dx = \sum_{i=0}^N w_i f(q_i)$. Setting the revert flag to true at construction time switches the weight from $\ln|x|$ to $\ln|1-x|$.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-12-27 18:25:10.320884149 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-12-27 18:25:10.324884177 +0000 @@ -128,15 +128,15 @@

    Detailed Description

    template<int dim>
    -class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    -

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    +class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    +

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    This quadrature formula is rather expensive, since it uses internally two Gauss quadrature formulas of order n to integrate the nonsingular part of the factor, and two GaussLog quadrature formulas to integrate on the separate segments $[0,x_0]$ and $[x_0,1]$. If the singularity is one of the extremes and the factor alpha is 1, then this quadrature is the same as QGaussLog.

    The last argument from the constructor allows you to use this quadrature rule in one of two possible ways:

    \[ \int_0^1 g(x) dx = \int_0^1 f(x)
 \ln\left(\frac{|x-x_0|}{\alpha}\right) dx = \sum_{i=0}^N w_i g(q_i) =
 \sum_{i=0}^N \bar{w}_i f(q_i) \]

    -

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    +

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    Notice that this quadrature rule is worthless if you try to use it for regular functions once you factored out the singularity.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-12-27 18:25:10.356884396 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-12-27 18:25:10.364884451 +0000 @@ -132,9 +132,9 @@ static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_size (const Point< dim > &singularity, const unsigned int n) &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    -

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    -

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    +class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    +

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    +

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    Definition at line 356 of file quadrature_lib.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-12-27 18:25:10.388884616 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-12-27 18:25:10.392884643 +0000 @@ -139,7 +139,7 @@

    Detailed Description

    template<int dim>
    -class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    +class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    Definition at line 581 of file quadrature_lib.h.

    Member Enumeration Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 2024-12-27 18:25:10.424884863 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 2024-12-27 18:25:10.424884863 +0000 @@ -200,8 +200,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-12-27 18:25:10.452885056 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-12-27 18:25:10.456885084 +0000 @@ -315,7 +315,7 @@

    Remove first column and update QR factorization.

    Starting from the given QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in {\mathbb R}^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in {\mathbb
 R}^m$.

    -

    The standard approach is to partition $R$ as

    +

    The standard approach is to partition $R$ as

    \[
 R =
 \begin{bmatrix}
@@ -368,7 +368,7 @@
   </tr>
 </table>
 </div><div class= -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -402,7 +402,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -436,7 +436,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -470,7 +470,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html 2024-12-27 18:25:10.480885248 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html 2024-12-27 18:25:10.480885248 +0000 @@ -186,8 +186,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-12-27 18:25:10.512885468 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-12-27 18:25:10.512885468 +0000 @@ -148,7 +148,7 @@ \end{align*}" src="form_762.png"/>

    Since the library assumes $[0,1]$ as reference interval, we will map these values on the proper reference interval in the implementation.

    -

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    +

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    Singular quadrature formula are rather expensive, nevertheless Telles' quadrature formula are much easier to compute with respect to other singular integration techniques as Lachat-Watson.

    We have implemented the case for $dim = 1$. When we deal the case $dim >1$ we have computed the quadrature formula has a tensorial product of one dimensional Telles' quadrature formulas considering the different components of the singularity.

    The weights and functions for Gauss Legendre formula have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 2024-12-27 18:25:10.532885605 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 2024-12-27 18:25:10.540885660 +0000 @@ -223,8 +223,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    [in]verticesThe vertices of the simplex you wish to integrate on
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-12-27 18:25:10.560885798 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-12-27 18:25:10.564885825 +0000 @@ -124,7 +124,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    class QWitherdenVincentSimplex< dim >

    Witherden-Vincent rules for simplex entities.

    -

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    +

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    The given value for n_points_1d = 1, 2, 3, 4, 5, 6, 7 (where the last two are only implemented in 2d) results in the following number of quadrature points in 2d and 3d:

    • 2d: odd (default): 1, 6, 7, 15, 19, 28, 37
    • 2d: even: 3, 6, 12, 16, 25, 33, 42
    • @@ -202,8 +202,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

      -

      where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

      -

      The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

      +

      where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

      +

      The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 2024-12-27 18:25:10.612886155 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 2024-12-27 18:25:10.620886210 +0000 @@ -242,9 +242,9 @@

      At least for quadrilaterals and hexahedra (or, more precisely, since we work on reference cells: for the unit square and the unit cube), quadrature formulas are typically tensor products of one-dimensional formulas (see also the section on implementation detail below).

      In order to allow for dimension independent programming, a quadrature formula of dimension zero exists. Since an integral over zero dimensions is the evaluation at a single point, any constructor of such a formula initializes to a single quadrature point with weight one. Access to the weight is possible, while access to the quadrature point is not permitted, since a Point of dimension zero contains no information. The main purpose of these formulae is their use in QProjector, which will create a useful formula of dimension one out of them.

      Mathematical background

      -

      For each quadrature formula we denote by m, the maximal degree of polynomials integrated exactly on the reference cell the quadrature formula corresponds to. This number is given in the documentation of each formula. The order of the integration error is m+1, that is, the error is the size of the cell to the m+1 by the Bramble-Hilbert Lemma. The number m is to be found in the documentation of each concrete formula. For the optimal formulae QGauss we have $m = 2N-1$, where $N$ is the constructor parameter to QGauss. The tensor product formulae are exact on tensor product polynomials of degree m in each space direction, but they are still only of (m+1)st order.

      +

      For each quadrature formula we denote by m, the maximal degree of polynomials integrated exactly on the reference cell the quadrature formula corresponds to. This number is given in the documentation of each formula. The order of the integration error is m+1, that is, the error is the size of the cell to the m+1 by the Bramble-Hilbert Lemma. The number m is to be found in the documentation of each concrete formula. For the optimal formulae QGauss we have $m = 2N-1$, where $N$ is the constructor parameter to QGauss. The tensor product formulae are exact on tensor product polynomials of degree m in each space direction, but they are still only of (m+1)st order.

      Tensor product quadrature

      -

      At least for hypercube reference cells (i.e., squares and cubes), most integration formulae in more than one space dimension are tensor products of quadrature formulae in one space dimension, or more generally the tensor product of a formula in (dim-1) dimensions and one in one dimension. There is a special constructor to generate a quadrature formula from two others. For example, the QGauss<dim> formulae include Ndim quadrature points in dim dimensions, where $N$ is the constructor parameter of QGauss.

      +

      At least for hypercube reference cells (i.e., squares and cubes), most integration formulae in more than one space dimension are tensor products of quadrature formulae in one space dimension, or more generally the tensor product of a formula in (dim-1) dimensions and one in one dimension. There is a special constructor to generate a quadrature formula from two others. For example, the QGauss<dim> formulae include Ndim quadrature points in dim dimensions, where $N$ is the constructor parameter of QGauss.

      Other uses of this class

      Quadrature objects are used in a number of places within deal.II where integration is performed, most notably via the FEValues and related classes. Some of these classes are also used in contexts where no integrals are involved, but where functions need to be evaluated at specific points, for example to evaluate the solution at individual points or to create graphical output. Examples are the implementation of VectorTools::point_value() and the DataOut and related classes (in particular in connection with the DataPostprocessor class). In such contexts, one often creates specific "Quadrature" objects in which the "quadrature points" are simply the points (in the coordinate system of the reference cell) at which one wants to evaluate the solution. In these kinds of cases, the weights stored by the current class are not used and the name "quadrature object" is interpreted as "list of evaluation points".

      /usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 2024-12-27 18:25:10.684886649 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 2024-12-27 18:25:10.688886677 +0000 @@ -499,7 +499,7 @@
      [in]verticesThe vertices of the simplex you wish to integrate on
    -

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2620 of file reference_cell.h.

    @@ -529,7 +529,7 @@
    -

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2709 of file reference_cell.h.

    @@ -1000,7 +1000,7 @@
    -

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    +

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    Definition at line 1878 of file reference_cell.h.

    @@ -1428,7 +1428,7 @@
    -

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    +

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    For ReferenceCells::Vertex, the reference cell is a zero-dimensional point in a zero-dimensional space. As a consequence, one cannot meaningfully define a volume for it. The function returns one for this case, because this makes it possible to define useful quadrature rules based on the center of a reference cell and its volume.

    Definition at line 2743 of file reference_cell.h.

    @@ -1460,9 +1460,9 @@

    Return the barycenter (i.e., the center of mass) of the reference cell that corresponds to the current object. The function is not called center() because one can define the center of an object in a number of different ways whereas the barycenter of a reference cell $K$ is unambiguously defined as

    -\[
+<picture><source srcset=\[
   \mathbf x_K = \frac{1}{V} \int_K \mathbf x \; dx
-\] +\]" src="form_1546.png"/>

    where $V$ is the volume of the reference cell (see also the volume() function).

    @@ -1494,7 +1494,7 @@
    -

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    +

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    The tolerance parameter may be less than zero, indicating that the point should be safely inside the cell.

    Definition at line 2807 of file reference_cell.h.

    @@ -1547,8 +1547,8 @@
    -

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    -
    Precondition
    $i$ must be between zero and dim-1.
    +

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    +
    Precondition
    $i$ must be between zero and dim-1.

    Definition at line 2916 of file reference_cell.h.

    @@ -2008,7 +2008,7 @@ const bool legacy_format&#href_anchor"memdoc"> -

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    +

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    The last argument, legacy_format, indicates whether to use the old, VTK legacy format (when true) or the new, VTU format (when false).

    /usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2024-12-27 18:25:10.716886869 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2024-12-27 18:25:10.716886869 +0000 @@ -544,7 +544,7 @@
    -

    Create and return a Teuchos smart reference counting pointer to the basis vector corresponding to the i ${}^{th}$ element of the wrapper vector.

    +

    Create and return a Teuchos smart reference counting pointer to the basis vector corresponding to the i ${}^{th}$ element of the wrapper vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2024-12-27 18:25:10.752887116 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2024-12-27 18:25:10.756887143 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2024-12-27 18:25:10.788887363 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2024-12-27 18:25:10.796887418 +0000 @@ -270,7 +270,7 @@ const unsigned int n_eigenpairs = 1&#href_anchor"memdoc"> -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -310,9 +310,9 @@ const unsigned int n_eigenpairs = 1&#href_anchor"memdoc"> -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -360,9 +360,9 @@ const unsigned int n_eigenpairs = 1&#href_anchor"memdoc"> -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -566,8 +566,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -610,8 +610,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -639,7 +639,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -667,8 +667,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2024-12-27 18:25:10.832887665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2024-12-27 18:25:10.836887693 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2024-12-27 18:25:10.876887967 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2024-12-27 18:25:10.880887995 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2024-12-27 18:25:10.916888242 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2024-12-27 18:25:10.920888270 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2024-12-27 18:25:10.960888544 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2024-12-27 18:25:10.964888572 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2024-12-27 18:25:11.000888819 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2024-12-27 18:25:11.004888846 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2024-12-27 18:25:11.048889148 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2024-12-27 18:25:11.052889176 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-12-27 18:25:11.100889506 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-12-27 18:25:11.100889506 +0000 @@ -211,85 +211,85 @@

    The class ARKode is a wrapper to SUNDIALS variable-step, embedded, additive Runge-Kutta solver which is a general purpose solver for systems of ordinary differential equations characterized by the presence of both fast and slow dynamics.

    Fast dynamics are treated implicitly, and slow dynamics are treated explicitly, using nested families of implicit and explicit Runge-Kutta solvers.

    Citing directly from ARKode documentation:

    -

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    +

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y) + f_I (t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2645.png"/>

    -

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    -

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    +

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    +

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    The two right-hand side functions may be described as:

      -
    • $f_E(t, y)$: contains the "slow" time scale components of the system. This will be integrated using explicit methods.
    • -
    • $f_I(t, y)$: contains the "fast" time scale components of the system. This will be integrated using implicit methods.
    • +
    • $f_E(t, y)$: contains the "slow" time scale components of the system. This will be integrated using explicit methods.
    • +
    • $f_I(t, y)$: contains the "fast" time scale components of the system. This will be integrated using implicit methods.
    -

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    -

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    -

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    +

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    +

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    +

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2655.png"/>

    -

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
-  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    -

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    +

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
+  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    +

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    -\[
+<picture><source srcset=\[
     M\dot y = f_I(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2659.png"/>

    -

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    +

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    For both DIRK and ARK methods, an implicit system of the form

    -\[
+<picture><source srcset=\[
    G(z_i) \dealcoloneq M z_i - h_n A^I_{i,i} f_I (t^I_{n,i}, z_i) - a_i = 0
-  \] + \]" src="form_2662.png"/>

    -

    must be solved for each stage $z_i , i = 1, \ldots, s$, where we have the data

    -\[
+<p> must be solved for each stage <picture><source srcset=$z_i , i = 1, \ldots, s$, where we have the data

    +\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} [ A^E_{i,j} f_E(t^E_{n,j}, z_j)
    + A^I_{i,j} f_I (t^I_{n,j}, z_j)]
-  \] + \]" src="form_2664.png"/>

    for the ARK methods, or

    -\[
+<picture><source srcset=\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} A^I_{i,j} f_I (t^I_{n,j}, z_j)
-  \] + \]" src="form_2665.png"/>

    -

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    -

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    +

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    +

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    For systems of either type, ARKode allows a choice of solution strategy. The default solver choice is a variant of Newton's method,

    -\[
+<picture><source srcset=\[
    z_i^{m+1} = z_i^m +\delta^{m+1},
-  \] + \]" src="form_2669.png"/>

    -

    where $m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    -\[
+<p> where <picture><source srcset=$m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    +\[
    N(z_i^m) \delta^{m+1} = -G(z_i^m),
-  \] + \]" src="form_2671.png"/>

    where

    -\[
+<picture><source srcset=\[
   N \dealcoloneq M - \gamma J, \quad J
   \dealcoloneq \frac{\partial f_I}{\partial y},
   \qquad \gamma\dealcoloneq h_n A^I_{i,i}.
-  \] + \]" src="form_2672.png"/>

    -

    As an alternate to Newton's method, ARKode may solve for each stage $z_i ,i
-  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    -\[
+<p>As an alternate to Newton's method, <a class=ARKode may solve for each stage $z_i ,i
+  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    +\[
   z_i^{m+1} = g(z_i^{m}), m=0,1,\ldots.
-  \] + \]" src="form_2674.png"/>

    Unlike with Newton's method, this option does not require the solution of a linear system at each iteration, instead opting for solution of a low-dimensional least-squares solution to construct the nonlinear update.

    -

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    -

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    -

    This improvement may be significant even for "small" values, e.g. $1 \leq
-  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    -

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    +

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    +

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    +

    This improvement may be significant even for "small" values, e.g. $1 \leq
+  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    +

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    The user has to provide the implementation of at least one (or both) of the following std::functions:

    To provide a simple example, consider the harmonic oscillator problem:

    -\[
+<picture><source srcset=\[
   \begin{split}
     u'' & = -k^2 u \\
     u (0) & = 0 \\
     u'(0) & = k
   \end{split}
-  \] + \]" src="form_2682.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
   \begin{matrix}
     y_0' & =  y_1 \\
     y_1' & = - k^2 y_0
   \end{matrix}
-  \] + \]" src="form_2683.png"/>

    -

    That is $y' = A y$ where

    -\[
+<p>That is <picture><source srcset=$y' = A y$ where

    +\[
   A \dealcoloneq
   \begin{pmatrix}
   0 & 1 \\
   -k^2 &0
   \end{pmatrix}
-  \] + \]" src="form_2685.png"/>

    -

    and $y(0)=(0, k)^T$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
-*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    and $y(0)=(0, k)^T$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
+*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    A minimal implementation, using only explicit RK methods, is given by the following code snippet:

    using VectorType = Vector<double>;
    @@ -733,8 +733,8 @@
    -

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -755,8 +755,8 @@
    -

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -778,7 +778,7 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-12-27 18:25:11.148889835 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-12-27 18:25:11.152889863 +0000 @@ -203,72 +203,72 @@

    Citing from the SUNDIALS documentation:

    Consider a system of Differential-Algebraic Equations written in the general form

    -\[
+<picture><source srcset=\[
    \begin{cases}
        F(t,y,\dot y) = 0\, , \\
        y(t_0) = y_0\, , \\
        \dot y (t_0) = \dot y_0\, .
    \end{cases}
- \] + \]" src="form_2701.png"/>

    -

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    +

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    -\[
+<picture><source srcset=\[
    \sum_{i=0}^q \alpha_{n,i}\,y_{n-i}=h_n\,\dot y_n\, ,
    \label{eq:bdf}
- \] + \]" src="form_2705.png"/>

    -

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    +

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    -\[
+<picture><source srcset=\[
    G(y_n)\equiv F\left(t_n,y_n,\dfrac{1}{h_n}\sum_{i=0}^q
   \alpha_{n,i}\,y_{n-i}\right)=0\, .
- \] + \]" src="form_2712.png"/>

    The Newton method leads to a linear system of the form

    -\[
+<picture><source srcset=\[
    J[y_{n(m+1)}-y_{n(m)}]=-G(y_{n(m)})\, ,
- \] + \]" src="form_2713.png"/>

    -

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    +

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    -\[
+<picture><source srcset=\[
    J=\dfrac{\partial G}{\partial y}
    = \dfrac{\partial F}{\partial y} +
      \alpha \dfrac{\partial F}{\partial \dot y}\, ,
- \] + \]" src="form_2715.png"/>

    -

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    +

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    A simple example: an ordinary differential equation

    To provide a simple example, consider the following harmonic oscillator problem:

    -\[ \begin{split}
+<picture><source srcset=\[ \begin{split}
    u'' & = -k^2 u \\
    u (0) & = 0 \\
    u'(0) & = k
  \end{split}
- \] + \]" src="form_2717.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
  \begin{matrix}
    y_0' & -y_1      & = 0 \\
    y_1' & + k^2 y_0 & = 0
  \end{matrix}
- \] + \]" src="form_2718.png"/>

    -

    That is, $F(y', y, t) = y' + A y = 0 $ where

    -\[
+<p>That is, <picture><source srcset=$F(y', y, t) = y' + A y = 0 $ where

    +\[
  A =
  \begin{pmatrix}
  0 & -1 \\
  k^2 &0
  \end{pmatrix}
- \] + \]" src="form_2720.png"/>

    -

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
- = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    -

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    +

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
+ = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    This is achieved by the following snippet of code:

    using VectorType = Vector<double>;
    VectorType y(2);
    @@ -330,68 +330,68 @@
    std::function< void(VectorType &)> reinit_vector
    Definition ida.h:898

    A differential algebraic equation (DAE) example

    -

    A more interesting example is a situation where the form $F(y', y, t) = 0$ provides something genuinely more flexible than a typical ordinary differential equation. Specifically, consider the equation

    -\begin{align*}
+<p>A more interesting example is a situation where the form <picture><source srcset=$F(y', y, t) = 0$ provides something genuinely more flexible than a typical ordinary differential equation. Specifically, consider the equation

    +\begin{align*}
    u'(t) &= av(t),
    \\
    0 &= v(t) - u(t).
- \end{align*} + \end{align*}" src="form_2726.png"/>

    -

    One can combine the two variables into $y(t) = [u(t), v(t)]^T$. Here, one of the two variables does not have a time derivative. In applications, this is often the case when one variable evolves in time (here, $u(t)$) on its own time scale, and the other one finds its value as a function of the former on a much faster time scale. In the current context, we could of course easily eliminate $v(t)$ using the second equation, and would then just be left with the equation

    -\[
+<p> One can combine the two variables into <picture><source srcset=$y(t) = [u(t), v(t)]^T$. Here, one of the two variables does not have a time derivative. In applications, this is often the case when one variable evolves in time (here, $u(t)$) on its own time scale, and the other one finds its value as a function of the former on a much faster time scale. In the current context, we could of course easily eliminate $v(t)$ using the second equation, and would then just be left with the equation

    +\[
    u'(t) = au(t)
- \] + \]" src="form_2730.png"/>

    -

    which has solution $u(t) = u(0)e^{at}$. But this is, in general, not easily possible if the two variables are related by differential operators. In fact, this happens quite frequently in application. Take, for example, the time-dependent Stokes equations:

    -\begin{align*}
+<p> which has solution <picture><source srcset=$u(t) = u(0)e^{at}$. But this is, in general, not easily possible if the two variables are related by differential operators. In fact, this happens quite frequently in application. Take, for example, the time-dependent Stokes equations:

    +\begin{align*}
    \frac{\partial \mathbf u(\mathbf x,t)}{\partial t}
    - \nu \Delta \mathbf u(\mathbf x,t) + \nabla p(\mathbf x,t)
    &= \mathbf f(\mathbf x,t),
    \\
    \nabla \cdot \mathbf u(\mathbf x,t) &= 0.
- \end{align*} + \end{align*}" src="form_2732.png"/>

    -

    Here, the fluid velocity $\mathbf u(\mathbf x,t)$ evolves over time, and the pressure is always in equilibrium with the flow because the Stokes equations are derived under the assumption that the speed of sound (at which pressure perturbations propagate) is much larger than the fluid velocity. As a consequence, there is no time derivative on the pressure available in the equation, but unlike the simple model problem above, the pressure can not easily be eliminated from the system. Similar situations happen in step-21, step-31, step-32, step-43, and others, where a subset of variables is always in instantaneous equilibrium with another set of variables that evolves on a slower time scale.

    +

    Here, the fluid velocity $\mathbf u(\mathbf x,t)$ evolves over time, and the pressure is always in equilibrium with the flow because the Stokes equations are derived under the assumption that the speed of sound (at which pressure perturbations propagate) is much larger than the fluid velocity. As a consequence, there is no time derivative on the pressure available in the equation, but unlike the simple model problem above, the pressure can not easily be eliminated from the system. Similar situations happen in step-21, step-31, step-32, step-43, and others, where a subset of variables is always in instantaneous equilibrium with another set of variables that evolves on a slower time scale.

    Another case where we could eliminate a variable but do not want to is where that additional variable is introduced in the first place to work around some other problem. As an example, consider the time dependent version of the biharmonic problem we consider in step-47 (as well as some later ones). The equations we would then be interested in would read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    \frac{\partial u(\mathbf x,t)}{\partial t} + \Delta^2 u(\mathbf x,t) &=
    f(\mathbf x,t).
- \end{align*} + \end{align*}" src="form_2734.png"/>

    -

    As discussed in step-47, the difficulty is the presence of the fourth derivatives. One way in which one can address this is by introducing an auxiliary variable $v=\Delta u$ which would render the problem into the following one that only ever has second derivatives which we know how to deal with:

    -\begin{align*}
+<p> As discussed in <a class=step-47, the difficulty is the presence of the fourth derivatives. One way in which one can address this is by introducing an auxiliary variable $v=\Delta u$ which would render the problem into the following one that only ever has second derivatives which we know how to deal with:

    +\begin{align*}
    \frac{\partial u(\mathbf x,t)}{\partial t} + \Delta v(\mathbf x,t) &=
    f(\mathbf x,t),
    \\
    v(\mathbf x,t)-\Delta u(\mathbf x,t) &= 0.
- \end{align*} + \end{align*}" src="form_2736.png"/>

    -

    Here, the introduction of the additional variable was voluntary, and could be undone, but we don't want that of course. Rather, we end up with a differential-algebraic equation because the equations do not have a time derivative for $v$.

    -

    Rather than show how to solve the trivial (linear) case above, let us instead consider the situation where we introduce another variable $v$ that is related to $u$ by the nonlinear relationship $v=u^p$, $p\ge 1$:

    -\begin{align*}
+<p> Here, the introduction of the additional variable was voluntary, and could be undone, but we don't want that of course. Rather, we end up with a differential-algebraic equation because the equations do not have a time derivative for <picture><source srcset=$v$.

    +

    Rather than show how to solve the trivial (linear) case above, let us instead consider the situation where we introduce another variable $v$ that is related to $u$ by the nonlinear relationship $v=u^p$, $p\ge 1$:

    +\begin{align*}
    u'(t) &= a v(t)^{1/p},
    \\
    0 &= v(t) - u(t)^p.
- \end{align*} + \end{align*}" src="form_2739.png"/>

    We will impose initial conditions as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    u(0) &= 1 \\
    v(0) &= 1.
- \end{align*} + \end{align*}" src="form_2740.png"/>

    -

    The problem continues to have the solution $u(t)=e^{at}$ with the auxiliary variable satisfying $v(t)=[e^{at}]^p$. One would implement all of this using the following little program where you have to recall that

    -\[
+<p> The problem continues to have the solution <picture><source srcset=$u(t)=e^{at}$ with the auxiliary variable satisfying $v(t)=[e^{at}]^p$. One would implement all of this using the following little program where you have to recall that

    +\[
    F = \begin{pmatrix}u' -a v^{1/p} \\ -u^p + v \end{pmatrix}
- \] + \]" src="form_2743.png"/>

    and that the Jacobian we need to provide is

    -\[
+<picture><source srcset=\[
    J(\alpha) =
    = \dfrac{\partial F}{\partial y} +
      \alpha \dfrac{\partial F}{\partial \dot y}
    = \begin{pmatrix} \alpha && -av^{1/p-1}/p \\ -pu^{p-1} & 1 \end{pmatrix}
- \] + \]" src="form_2744.png"/>

    All of this can be implemented using the following code:

    const double a = 1.0;
    const double p = 1.5;
    @@ -447,30 +447,30 @@
    time_stepper.solve_dae(y, y_dot);
    std::function< void(const VectorType &rhs, VectorType &dst, const double tolerance)> solve_with_jacobian
    Definition ida.h:995
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
    -

    Note that in this code, we not only provide initial conditions for $u$ and $v$, but also for $u'$ and $v'$. We can do this here because we know what the exact solution is.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-12-27 18:25:11.184890082 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-12-27 18:25:11.188890110 +0000 @@ -179,14 +179,14 @@
    -

    IDA is a Differential Algebraic solver. As such, it requires initial conditions also for the first order derivatives. If you do not provide consistent initial conditions, (i.e., conditions for which $F(\dot
-y(0), y(0), 0) = 0)$, you can ask SUNDIALS to compute initial conditions for you by specifying InitialConditionCorrection for the initial conditions both at the initial_time (ic_type) and after a reset has occurred (reset_type).

    +

    IDA is a Differential Algebraic solver. As such, it requires initial conditions also for the first order derivatives. If you do not provide consistent initial conditions, (i.e., conditions for which $F(\dot
+y(0), y(0), 0) = 0)$, you can ask SUNDIALS to compute initial conditions for you by specifying InitialConditionCorrection for the initial conditions both at the initial_time (ic_type) and after a reset has occurred (reset_type).

    - -
    Enumerator
    none&#href_anchor"fielddoc">

    Do not try to make initial conditions consistent.

    use_y_diff 

    Compute the algebraic components of $y$ and differential components of $\dot y$, given the differential components of $y$. This option requires that the user specifies differential and algebraic components in the function IDA::differential_components().

    +
    use_y_diff 

    Compute the algebraic components of $y$ and differential components of $\dot y$, given the differential components of $y$. This option requires that the user specifies differential and algebraic components in the function IDA::differential_components().

    use_y_dot 

    Compute all components of $y$, given $\dot y$.

    +
    use_y_dot 

    Compute all components of $y$, given $\dot y$.

    @@ -565,8 +565,8 @@

    Type of correction for initial conditions.

    -

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    -

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    +

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    +

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    Definition at line 775 of file ida.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-12-27 18:25:11.220890330 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-12-27 18:25:11.228890384 +0000 @@ -188,48 +188,48 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class SUNDIALS::KINSOL< VectorType >

    Interface to SUNDIALS' nonlinear solver (KINSOL).

    -

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
-= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    +

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
+= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    KINSOL's Newton solver employs the inexact Newton method. As this solver is intended mainly for large systems, the user is required to provide their own solver function.

    At the highest level, KINSOL implements the following iteration scheme:

      -
    • set $u_0$ = an initial guess
    • -
    • For $n = 0, 1, 2, \ldots$ until convergence do:
        -
      • Solve $J(u_n)\delta_n = -F(u_n)$
      • -
      • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
      • +
      • set $u_0$ = an initial guess
      • +
      • For $n = 0, 1, 2, \ldots$ until convergence do:
          +
        • Solve $J(u_n)\delta_n = -F(u_n)$
        • +
        • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
        • Test for convergence
      -

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      +

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      Unless specified otherwise by the user, KINSOL strives to update Jacobian information as infrequently as possible to balance the high costs of matrix operations against other costs. Specifically, these updates occur when:

      • the problem is initialized,
      • -
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • +
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • a specified number of nonlinear iterations have passed since the last update,
      • the linear solver failed recoverably with outdated Jacobian information,
      • the global strategy failed with outdated Jacobian information, or
      • -
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.
      • +
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.

      KINSOL allows changes to the above strategy through optional solver inputs. The user can disable the initial Jacobian information evaluation or change the default value of the number of nonlinear iterations after which a Jacobian information update is enforced.

      -

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      +

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      When scaling values are provided for the solution vector, these values are automatically incorporated into the calculation of the perturbations used for the default difference quotient approximations for Jacobian information if the user does not supply a Jacobian solver through the solve_with_jacobian() function.

      -

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      +

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      The basic fixed-point iteration scheme implemented in KINSOL is given by:

        -
      • Set $u_0 =$ an initial guess
      • -
      • For $n = 0, 1, 2, \dots$ until convergence do:
          -
        • Set $u_{n+1} = G(u_n)$
        • +
        • Set $u_0 =$ an initial guess
        • +
        • For $n = 0, 1, 2, \dots$ until convergence do:
            +
          • Set $u_{n+1} = G(u_n)$
          • Test for convergence
        -

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        -

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        -

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        +

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        +

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        +

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        The user has to provide the implementation of the following std::functions:

        • reinit_vector; and only one of
        • residual; or
        • iteration_function;
        -

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        +

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        If the use of a Newton or Picard method is desired, then the user should also supply

        • solve_with_jacobian; and optionally
        • setup_jacobian;
        • @@ -430,7 +430,7 @@
    -

    A function object that users should supply and that is intended to compute the iteration function $G(u)$ for the fixed point iteration. This function is only used if the SolutionStrategy::fixed_point strategy is selected.

    +

    A function object that users should supply and that is intended to compute the iteration function $G(u)$ for the fixed point iteration. This function is only used if the SolutionStrategy::fixed_point strategy is selected.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 512 of file kinsol.h.

    @@ -452,14 +452,14 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian().

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    -

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    +

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    The setup_jacobian() function may call a user-supplied function, or a function within the linear solver group, to compute Jacobian-related data that is required by the linear solver. It may also preprocess that data as needed for solve_with_jacobian(), which may involve calling a generic function (such as for LU factorization) or, more generally, build preconditioners from the assembled Jacobian. In any case, the data so generated may then be used whenever a linear system is solved.

    The point of this function is that setup_jacobian() function is not called at every Newton iteration, but only as frequently as the solver determines that it is appropriate to perform the setup task. In this way, Jacobian-related data generated by setup_jacobian() is expected to be used over a number of Newton iterations. KINSOL determines itself when it is beneficial to regenerate the Jacobian and associated information (such as preconditioners computed for the Jacobian), thereby saving the effort to regenerate the Jacobian matrix and a preconditioner for it whenever possible.

    Parameters
    - - + +
    current_uCurrent value of $u$
    current_fCurrent value of $F(u)$ or $G(u)$
    current_uCurrent value of $u$
    current_fCurrent value of $F(u)$ or $G(u)$
    @@ -484,12 +484,12 @@

    A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    If you do not specify a solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    @@ -514,7 +514,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    +

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    If no function is provided to a KINSOL object, then this is interpreted as implicitly saying that all of these scaling factors should be considered as one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -536,7 +536,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the function evaluation away from the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    +

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 674 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2024-12-27 18:25:11.256890577 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2024-12-27 18:25:11.264890632 +0000 @@ -433,7 +433,7 @@
    -

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    +

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    If set to zero, default values provided by KINSOL will be used.

    Definition at line 366 of file kinsol.h.

    @@ -533,7 +533,7 @@
    -

    The relative error in computing $F(u)$, which is used in the difference quotient approximation to the Jacobian matrix when the user does not supply a solve_with_jacobian() function.

    +

    The relative error in computing $F(u)$, which is used in the difference quotient approximation to the Jacobian matrix when the user does not supply a solve_with_jacobian() function.

    If set to zero, default values provided by KINSOL will be used.

    Definition at line 410 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-12-27 18:25:11.352891236 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-12-27 18:25:11.352891236 +0000 @@ -373,15 +373,15 @@

    Detailed Description

    template<typename NumberType>
    class ScaLAPACKMatrix< NumberType >

    A wrapper class around ScaLAPACK parallel dense linear algebra.

    -

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    -

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    +

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    +

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    Block-Cyclic Distribution
    -

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    +

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    The choice of the block sizes is a compromise between a sufficiently large size for efficient local/serial BLAS, but one that is also small enough to achieve good parallel load balance.

    Below we show a strong scaling example of ScaLAPACKMatrix::invert() on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets 2.20GHz, 10 cores/socket. Calculations are performed on square processor grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10.

    @@ -626,7 +626,7 @@

    Constructor for a rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 80 of file scalapack.cc.

    @@ -663,7 +663,7 @@

    Constructor for a square matrix of size size, and distributed using the process grid in process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 105 of file scalapack.cc.

    @@ -701,7 +701,7 @@

    Constructor for a general rectangular matrix that is read from the file filename and distributed using the grid process_grid.

    Loads the matrix from file filename using HDF5. In case that deal.II was built without HDF5 a call to this function will cause an exception to be thrown.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 121 of file scalapack.cc.

    @@ -777,7 +777,7 @@

    Initialize the rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 216 of file scalapack.cc.

    @@ -814,7 +814,7 @@

    Initialize the square matrix of size size and distributed using the grid process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 290 of file scalapack.cc.

    @@ -1058,9 +1058,9 @@
    -

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 980 of file scalapack.cc.

    @@ -1098,13 +1098,13 @@ transpose_B Block Sizes Operation -false $MB_A=MB_B$
    - $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ +false $MB_A=MB_B$
    + $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ -true $MB_A=NB_B$
    - $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ +true $MB_A=NB_B$
    + $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    Definition at line 990 of file scalapack.cc.

    @@ -1127,9 +1127,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    Definition at line 1046 of file scalapack.cc.

    @@ -1152,9 +1152,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 1056 of file scalapack.cc.

    @@ -1203,24 +1203,24 @@ transpose_A transpose_B Block Sizes Operation -false false $MB_A=MB_C$
    - $NB_A=MB_B$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ +false false $MB_A=MB_C$
    + $NB_A=MB_B$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ -false true $MB_A=MB_C$
    - $NB_A=NB_B$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ +false true $MB_A=MB_C$
    + $NB_A=NB_B$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ -true false $MB_A=MB_B$
    - $NB_A=MB_C$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ +true false $MB_A=MB_B$
    + $NB_A=MB_C$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ -true true $MB_A=NB_B$
    - $NB_A=MB_C$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ +true true $MB_A=NB_B$
    + $NB_A=MB_C$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    Definition at line 1066 of file scalapack.cc.

    @@ -1249,11 +1249,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    Definition at line 1183 of file scalapack.cc.

    @@ -1282,11 +1282,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of $\mathbf{A}$.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    Definition at line 1197 of file scalapack.cc.

    @@ -1314,12 +1314,12 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-12-27 18:25:11.404891593 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-12-27 18:25:11.408891620 +0000 @@ -272,7 +272,7 @@
    Vector<double> solution_1d;
    -

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;
    +

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;

    then in order to evaluate the boundary conditions for this 2d problem, we would want to call VectorTools::interpolate_boundary_values() via

    AffineConstraints<double> boundary_values_2d;
    123,
    @@ -280,7 +280,7 @@
    boundary_values_2d);
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
    -

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    solution_1d_as_function_object (dof_handler_1d, solution_1d);
    auto boundary_evaluator
    = [&] (const Point<2> &p)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-12-27 18:25:11.448891895 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-12-27 18:25:11.456891950 +0000 @@ -379,8 +379,8 @@ \]" src="form_2508.png"/>

    starting from initial state x.

    -

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
-\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    +

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
+\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    @@ -401,7 +401,7 @@

    Connect a slot to perform a custom line-search.

    -

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    +

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-12-27 18:25:11.492892197 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-12-27 18:25:11.496892225 +0000 @@ -209,10 +209,10 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    +class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    FIRE is a damped dynamics method described in Structural Relaxation Made Simple by Bitzek et al. 2006, typically used to find stable equilibrium configurations of atomistic systems in computational material science. Starting from a given initial configuration of the atomistic system, the algorithm relies on inertia to obtain (nearest) configuration with least potential energy.

    Notation:

      -
    • The global vector of unknown variables: $\mathbf x$.
    • +
    • The global vector of unknown variables: $\mathbf x$.
    • Objective function: $E(\mathbf x)$.
    • Rate of change of unknowns: $\mathbf v$.
    • Gradient of the objective function w.r.t unknowns: $\mathbf g = \nabla E(\mathbf x)$.
    • @@ -220,15 +220,15 @@
    • Initial guess of unknowns: $\mathbf x_0$.
    • Time step: $\Delta t$.
    -

    Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      +

      Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
      2. -
      3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        +
      4. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        $\mathbf x = \mathbf x + \Delta t \mathbf v$,
        $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
      5. Calculate $p = \mathbf g \cdot \mathbf v$.
      6. Set $\mathbf v = (1-\alpha) \mathbf v
                   + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
      7. -
      8. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      9. +
      10. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
      12. Return to 1.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-12-27 18:25:11.528892444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-12-27 18:25:11.532892472 +0000 @@ -440,7 +440,7 @@ const PreconditionerType & preconditioner&#href_anchor"memdoc"> -

      Solve $A^Tx=b$ for $x$.

      +

      Solve $A^Tx=b$ for $x$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-12-27 18:25:11.576892774 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-12-27 18:25:11.580892801 +0000 @@ -590,7 +590,7 @@

    The solution will be returned in place of the right hand side vector.

    Parameters
    - +
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in]transposeIf set to true, this function solves the linear $A^T x = b$ instead of $Ax=b$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-12-27 18:25:11.668893406 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-12-27 18:25:11.668893406 +0000 @@ -1822,7 +1822,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2074,7 +2074,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2184,7 +2184,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2255,8 +2255,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2284,8 +2284,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-12-27 18:25:11.740893900 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-12-27 18:25:11.744893928 +0000 @@ -1625,7 +1625,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1970,7 +1970,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2080,7 +2080,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2151,8 +2151,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2180,8 +2180,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-12-27 18:25:11.820894449 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-12-27 18:25:11.824894477 +0000 @@ -401,8 +401,8 @@
    template<typename number>
    class SparseMIC< number >

    Implementation of the Modified Incomplete Cholesky (MIC(0)) preconditioner for symmetric matrices. This class conforms to the state and usage specification in SparseLUDecomposition.

    The decomposition

    -

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
-- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    +

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
+- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    Definition at line 45 of file sparse_mic.h.

    Member Typedef Documentation

    @@ -1891,7 +1891,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2143,7 +2143,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2253,7 +2253,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2324,8 +2324,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2353,8 +2353,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-12-27 18:25:11.908895054 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-12-27 18:25:11.916895109 +0000 @@ -1465,7 +1465,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1825,7 +1825,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1914,7 +1914,7 @@ const bool rebuild_sparsity_pattern = true&#href_anchor"memdoc">

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -1970,8 +1970,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -1991,8 +1991,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-12-27 18:25:11.984895575 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-12-27 18:25:11.992895630 +0000 @@ -1219,7 +1219,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    @@ -1242,7 +1242,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    @@ -1265,7 +1265,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1288,7 +1288,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    @@ -1397,7 +1397,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    +

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    @@ -1425,7 +1425,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    +

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    @@ -1460,7 +1460,7 @@
    -

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    +

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    This function requires that B has a const_iterator traversing all matrix entries and that A has a function el(i,j) for access to a specific entry.

    Definition at line 1463 of file sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-12-27 18:25:12.028895878 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-12-27 18:25:12.028895878 +0000 @@ -156,7 +156,7 @@

    The typical use for these iterators is to iterate over the elements of a sparse matrix or over the elements of individual rows. Note that there is no guarantee that the elements of a row are actually traversed in an order in which columns monotonically increase. See the documentation of the SparsityPattern class for more information.

    The first template argument denotes the underlying numeric type, the second the constness of the matrix.

    Since there is a specialization of this class for Constness=false, this class is for iterators to constant matrices.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 346 of file sparse_matrix.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-12-27 18:25:12.076896207 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-12-27 18:25:12.080896235 +0000 @@ -1174,7 +1174,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half bandwidth" in the literature.

    Definition at line 673 of file sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-12-27 18:25:12.116896482 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-12-27 18:25:12.120896509 +0000 @@ -177,7 +177,7 @@

    Detailed Description

    An iterator class for walking over the elements of a sparsity pattern.

    The typical use for these iterators is to iterate over the elements of a sparsity pattern (or, since they also serve as the basis for iterating over the elements of an associated matrix, over the elements of a sparse matrix), or over the elements of individual rows. There is no guarantee that the elements of a row are actually traversed in an order in which column numbers monotonically increase. See the documentation of the SparsityPattern class for more information.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 279 of file sparsity_pattern.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-12-27 18:25:12.160896784 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-12-27 18:25:12.164896811 +0000 @@ -219,20 +219,20 @@ class SphericalManifold< dim, spacedim >

    Manifold description for a spherical space coordinate system.

    You can use this Manifold object to describe any sphere, circle, hypersphere or hyperdisc in two or three dimensions. This manifold can be used as a co-dimension one manifold descriptor of a spherical surface embedded in a higher dimensional space, or as a co-dimension zero manifold descriptor for a body with positive volume, provided that the center of the spherical space is excluded from the domain. An example for the use of this function would be in the description of a hyper-shell or hyper-ball geometry, for example after creating a coarse mesh using GridGenerator::hyper_ball(). (However, it is worth mentioning that generating a good mesh for a disk or ball is complicated and requires addition steps. See the "Possibilities for extensions" section of step-6 for an extensive discussion of how one would construct such meshes and what one needs to do for it.)

    The two template arguments match the meaning of the two template arguments in Triangulation<dim, spacedim>, however this Manifold can be used to describe both thin and thick objects, and the behavior is identical when dim <= spacedim, i.e., the functionality of SphericalManifold<2,3> is identical to SphericalManifold<3,3>.

    -

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    +

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    These two points would be connected (using a PolarManifold) by the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   s: [0,1]  & \rightarrow &  \mathbb S^3 \\
           t & \mapsto     &  (1,\pi/3,0) + (0,0,t\pi)
-\end{align*} +\end{align*}" src="form_1525.png"/>

    This curve is not a geodesic on the sphere, and it is not how we would connect those two points. A better curve, would be the one passing through the North pole:

    -\[
+<picture><source srcset=\[
  s(t) = x_1 \cos(\alpha(t)) + \kappa \times x_1 \sin(\alpha(t)) +
  \kappa ( \kappa \cdot x_1) (1-\cos(\alpha(t))).
-\] +\]" src="form_1526.png"/>

    -

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    +

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    In particular, this class implements a Manifold that joins any two points in space by first projecting them onto the surface of a sphere with unit radius, then connecting them with a geodesic, and finally rescaling the final radius so that the resulting one is the weighted average of the starting radii. This Manifold is identical to PolarManifold in dimension two, while for dimension three it returns points that are more uniformly distributed on the sphere, and it is invariant with respect to rotations of the coordinate system, therefore avoiding the problems that PolarManifold has at the poles. Notice, in particular, that computing tangent vectors at the poles with a PolarManifold is not well defined, while it is perfectly fine with this class.

    For mathematical reasons, it is impossible to construct a unique map of a sphere using only geodesic curves, and therefore, using this class with MappingManifold is discouraged. If you use this Manifold to describe the geometry of a sphere, you should use MappingQ as the underlying mapping, and not MappingManifold.

    This Manifold can be used only on geometries where a ball with finite radius is removed from the center. Indeed, the center is a singular point for this manifold, and if you try to connect two points across the center, they would travel on spherical coordinates, avoiding the center.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 2024-12-27 18:25:12.184896949 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 2024-12-27 18:25:12.188896976 +0000 @@ -287,7 +287,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-12-27 18:25:12.268897526 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-12-27 18:25:12.272897553 +0000 @@ -318,7 +318,7 @@ std::ostream &&#href_anchor"memTemplItemRight" valign="bottom">operator<< (std::ostream &out, const SymmetricTensor< 4, dim, Number > &t) &#href_anchor"details" id="details">

    Detailed Description

    template<int rank_, int dim, typename Number>
    -class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    +class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    Using this class for symmetric tensors of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It is also more efficient than using the more general Tensor class, since fewer elements are stored, and the class automatically makes sure that the tensor represents a symmetric object.

    For tensors of higher rank, the savings in storage are even higher. For example for the $3 \times 3 \times 3 \times 3$ tensors of rank 4, only 36 instead of the full 81 entries have to be stored. These rank 4 tensors are denoted by blackboard-style upper-case Latin letters such as $\mathbb A$ with components $\mathcal{A}_{ijkl}$.

    While the definition of a symmetric rank-2 tensor is obvious, tensors of rank 4 are considered symmetric if they are operators mapping symmetric rank-2 tensors onto symmetric rank-2 tensors. This so-called minor symmetry of the rank 4 tensor requires that for every set of four indices $i, j, k, l$, the identity $\mathcal{C}_{ijkl} = \mathcal{C}_{jikl} =
@@ -647,7 +647,7 @@
   </tr>
 </table>
 </div><div class= -

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    +

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    @@ -909,8 +909,8 @@
    -

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    -

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    +

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    +

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    Note that the multiplication operator for symmetric tensors is defined to be a double contraction over two indices, while it is defined as a single contraction over only one index for regular Tensor objects. For symmetric tensors it therefore acts in a way that is commonly denoted by a "colon multiplication" in the mathematical literature (the two dots of the colon suggesting that it is a contraction over two indices), which corresponds to a scalar product between tensors.

    It is worth pointing out that this definition of operator* between symmetric tensors is different to how the (in general non-symmetric) Tensor class defines operator*, namely as the single-contraction product over the last index of the first operand and the first index of the second operand. For the double contraction of Tensor objects, you will need to use the double_contract() function.

    To maintain at least a modicum of resemblance between the interfaces of Tensor and SymmetricTensor, there are also global functions double_contract() for symmetric tensors that then do the same work as this operator. However, rather than returning the result as a return value, they write it into the first argument to the function in the same way as the corresponding functions for the Tensor class do things.

    @@ -1254,7 +1254,7 @@
    -

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    +

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    @@ -1910,7 +1910,7 @@ \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$" src="form_829.png"/>.

    For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    + = A_{11} A_{22} - A_{12}^2$" src="form_830.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    Definition at line 2841 of file symmetric_tensor.h.

    @@ -1999,8 +1999,8 @@
    -

    Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    -

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    +

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

    Warning
    The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
    @@ -2602,7 +2602,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    Definition at line 3735 of file symmetric_tensor.h.

    @@ -2632,7 +2632,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3757 of file symmetric_tensor.h.

    @@ -2662,7 +2662,7 @@
    -

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3784 of file symmetric_tensor.h.

    @@ -2968,13 +2968,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -3006,13 +3006,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -3159,7 +3159,7 @@ Initial value:

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    +

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    Definition at line 743 of file symmetric_tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-12-27 18:25:12.320897883 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-12-27 18:25:12.324897910 +0000 @@ -245,7 +245,7 @@

    In some way, this class is similar to the Tensor class, in that it templatizes on the number of dimensions. However, there are two major differences. The first is that the Tensor class stores only numeric values (as doubles), while the Table class stores arbitrary objects. The second is that the Tensor class has fixed sizes in each dimension, also given as a template argument, while this class can handle arbitrary and different sizes in each dimension.

    This has two consequences. First, since the size is not known at compile time, it has to do explicit memory allocation. Second, the layout of individual elements is not known at compile time, so access is slower than for the Tensor class where the number of elements are their location is known at compile time and the compiler can optimize with this knowledge (for example when unrolling loops). On the other hand, this class is of course more flexible, for example when you want a two-dimensional table with the number of rows equal to the number of degrees of freedom on a cell, and the number of columns equal to the number of quadrature points. Both numbers may only be known at run-time, so a flexible table is needed here. Furthermore, you may want to store, say, the gradients of shape functions, so the data type is not a single scalar value, but a tensor itself.

    Dealing with large data sets

    -

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    +

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    If you need to load such tables on single-processor (or multi-threaded) jobs, then there is nothing you can do about the size of these tables: The table just has to fit into memory. But, if your program is parallelized via MPI, then a typical first implementation would create a table object on every process and fill it on every MPI process by reading the data from a file. This is inefficient from two perspectives:

    • You will have a lot of processes that are all trying to read from the same file at the same time.
    • In most cases, the data stored on every process is the same, and while every process needs to be able to read from a table, it is not necessary that every process stores its own table: All MPI processes that happen to be located on the same machine might as well store only one copy and make it available to each other via shared memory; in this model, only one MPI process per machine needs to store the data, and all other processes could then access it.
    • /usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 2024-12-27 18:25:12.364898185 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 2024-12-27 18:25:12.364898185 +0000 @@ -213,7 +213,7 @@

      Two (or more) columns may be merged into a "supercolumn" by twice (or multiple) calling add_column_to_supercolumn(), see there. Additionally there is a function to set for each column the precision of the output of numbers, and there are several functions to prescribe the format and the captions the columns are written with in tex mode.

      A detailed explanation of this class is also given in the step-13 tutorial program.

      Example

      -

      This is a simple example demonstrating the usage of this class. The first column includes the numbers $i=1 \dots n$, the second $1^2 \dots n^2$, the third $\sqrt{1}\dots\sqrt{n}$, where the second and third columns are merged into one supercolumn with the superkey squares and roots. Additionally the first column is aligned to the right (the default was centered) and the precision of the square roots are set to be 6 (instead of 4 as default).

      +

      This is a simple example demonstrating the usage of this class. The first column includes the numbers $i=1 \dots n$, the second $1^2 \dots n^2$, the third $\sqrt{1}\dots\sqrt{n}$, where the second and third columns are merged into one supercolumn with the superkey squares and roots. Additionally the first column is aligned to the right (the default was centered) and the precision of the square roots are set to be 6 (instead of 4 as default).

      for (unsigned int i = 1; i <= n; ++i)
      {
      @@ -244,9 +244,9 @@

      When generating output, TableHandler expects that all columns have the exact same number of elements in it so that the result is in fact a table. This assumes that in each of the iterations (time steps, nonlinear iterations, etc) you fill every single column. On the other hand, this may not always be what you want to do. For example, it could be that the function that computes the nonlinear residual is only called every few time steps; or, a function computing statistics of the mesh is only called whenever the mesh is in fact refined. In these cases, the add_value() function will be called less often for some columns and the column would therefore have fewer elements; furthermore, these elements would not be aligned with the rows that contain the other data elements that were produced during this iteration. An entirely different scenario is that the table is filled and at a later time we use the data in there to compute the elements of other rows; the ConvergenceTable class does something like this.

      To support both scenarios, the TableHandler class has a property called auto-fill mode. By default, auto-fill mode is off, but it can be enabled by calling set_auto_fill_mode(). If auto-fill mode is enabled we use the following algorithm:

        -
      • When calling add_value(key, value), we count the number of elements in the column corresponding to key. Let's call this number $m$.
      • -
      • We also determine the maximal number of elements in the other columns; call it $n$.
      • -
      • If $m < n-1$ then we add $n-m-1$ copies of the object T() to this column. Here, T is the data type of the given value. For example, if T is a numeric type, then T() is the number zero; if T is std::string, then T() is the empty string "".
      • +
      • When calling add_value(key, value), we count the number of elements in the column corresponding to key. Let's call this number $m$.
      • +
      • We also determine the maximal number of elements in the other columns; call it $n$.
      • +
      • If $m < n-1$ then we add $n-m-1$ copies of the object T() to this column. Here, T is the data type of the given value. For example, if T is a numeric type, then T() is the number zero; if T is std::string, then T() is the empty string "".
      • Add the given value to this column.

      Padding the column with default elements makes sure that after the addition the column has as many entries as the longest other column. In other words, if we have skipped previous invocations of add_value() for a given key, then the padding will enter default values into this column.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 2024-12-27 18:25:12.428898624 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 2024-12-27 18:25:12.432898652 +0000 @@ -281,13 +281,13 @@ class Tensor< rank_, dim, Number >

      A general tensor class with an arbitrary rank, i.e. with an arbitrary number of indices. The Tensor class provides an indexing operator and a bit of infrastructure, but most functionality is recursively handed down to tensors of rank 1 or put into external templated functions, e.g. the contract family.

      The rank of a tensor specifies which types of physical quantities it can represent:

      • -A rank-0 tensor is a scalar that can store quantities such as temperature or pressure. These scalar quantities are shown in this documentation as simple lower-case Latin letters e.g. $a, b, c, \dots$.
      • +A rank-0 tensor is a scalar that can store quantities such as temperature or pressure. These scalar quantities are shown in this documentation as simple lower-case Latin letters e.g. $a, b, c, \dots$.
      • -A rank-1 tensor is a vector with dim components and it can represent vector quantities such as velocity, displacement, electric field, etc. They can also describe the gradient of a scalar field. The notation used for rank-1 tensors is bold-faced lower-case Latin letters e.g. $\mathbf a, \mathbf b, \mathbf c, \dots$. The components of a rank-1 tensor such as $\mathbf a$ are represented as $a_i$ where $i$ is an index between 0 and dim-1.
      • +A rank-1 tensor is a vector with dim components and it can represent vector quantities such as velocity, displacement, electric field, etc. They can also describe the gradient of a scalar field. The notation used for rank-1 tensors is bold-faced lower-case Latin letters e.g. $\mathbf a, \mathbf b, \mathbf c, \dots$. The components of a rank-1 tensor such as $\mathbf a$ are represented as $a_i$ where $i$ is an index between 0 and dim-1.
      • -A rank-2 tensor is a linear operator that can transform a vector into another vector. These tensors are similar to matrices with $\text{dim} \times \text{dim}$ components. There is a related class SymmetricTensor<2,dim> for tensors of rank 2 whose elements are symmetric. Rank-2 tensors are usually denoted by bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters for example $\boldsymbol{\varepsilon}, \boldsymbol{\sigma}$. The components of a rank 2 tensor such as $\mathbf A$ are shown with two indices $(i,j)$ as $A_{ij}$. These tensors usually describe the gradients of vector fields (deformation gradient, velocity gradient, etc.) or Hessians of scalar fields. Additionally, mechanical stress tensors are rank-2 tensors that map the unit normal vectors of internal surfaces into local traction (force per unit area) vectors.
      • +A rank-2 tensor is a linear operator that can transform a vector into another vector. These tensors are similar to matrices with $\text{dim} \times \text{dim}$ components. There is a related class SymmetricTensor<2,dim> for tensors of rank 2 whose elements are symmetric. Rank-2 tensors are usually denoted by bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters for example $\boldsymbol{\varepsilon}, \boldsymbol{\sigma}$. The components of a rank 2 tensor such as $\mathbf A$ are shown with two indices $(i,j)$ as $A_{ij}$. These tensors usually describe the gradients of vector fields (deformation gradient, velocity gradient, etc.) or Hessians of scalar fields. Additionally, mechanical stress tensors are rank-2 tensors that map the unit normal vectors of internal surfaces into local traction (force per unit area) vectors.
      • -Tensors with ranks higher than 2 are similarly defined in a consistent manner. They have $\text{dim}^{\text{rank}}$ components and the number of indices required to identify a component equals rank. For rank-4 tensors, a symmetric variant called SymmetricTensor<4,dim> exists.
      • +Tensors with ranks higher than 2 are similarly defined in a consistent manner. They have $\text{dim}^{\text{rank}}$ components and the number of indices required to identify a component equals rank. For rank-4 tensors, a symmetric variant called SymmetricTensor<4,dim> exists.

      Using this tensor class for objects of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It also makes the code easier to read because of the semantic difference between a tensor (an object that relates to a coordinate system and has transformation properties with regard to coordinate rotations and transforms) and matrices (which we consider as operators on arbitrary vector spaces related to linear algebra things).

      Template Parameters
      @@ -1227,7 +1227,7 @@
      -

      Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

      +

      Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

      @@ -1255,7 +1255,7 @@
      -

      Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

      +

      Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

      @@ -1891,11 +1891,11 @@

      Entrywise multiplication of two tensor objects of general rank.

      This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

      -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_892.png"/>

      Template Parameters
      @@ -1934,17 +1934,17 @@
      -

      The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

      Note
      For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
      -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
      +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

      Definition at line 3039 of file tensor.h.

      @@ -1974,7 +1974,7 @@
      -

      Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

      +

      Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

      Definition at line 3065 of file tensor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-12-27 18:25:12.484899009 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-12-27 18:25:12.488899036 +0000 @@ -233,7 +233,7 @@

      Detailed Description

      template<int dim, int dim_A, int spacedim_A, int chartdim_A, int dim_B, int spacedim_B, int chartdim_B>
      class TensorProductManifold< dim, dim_A, spacedim_A, chartdim_A, dim_B, spacedim_B, chartdim_B >

      Tensor product manifold of two ChartManifolds.

      -

      This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

      +

      This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

      An example usage would be the combination of a SphericalManifold with space dimension 2 and a FlatManifold with space dimension 1 to form a cylindrical manifold.

      pull_back(), push_forward(), and push_forward_gradient() are implemented by splitting the input argument into inputs for A and B according to the given dimensions and applying the corresponding operations before concatenating the result.

      Note
      The dimension arguments dim_A and dim_B are not used.
      @@ -605,24 +605,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -631,11 +631,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-12-27 18:25:12.524899283 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-12-27 18:25:12.528899311 +0000 @@ -174,7 +174,7 @@ M_1 \otimes A_0 \end{align*}" src="form_2024.png"/>

      -

      in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

      +

      in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

      This class implements two basic operations, namely the usual multiplication by a vector and the inverse. For both operations, fast tensorial techniques can be applied that implement the operator evaluation in $\text{size}(M)^{d+1}$ arithmetic operations, considerably less than $\text{size}(M)^{2d}$ for the naive forward transformation and $\text{size}(M)^{3d}$ for setting up the inverse of $L$.

      Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to 1964's work by Lynch et al. [Lynch1964],

      \begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html	2024-12-27 18:25:12.584899695 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html	2024-12-27 18:25:12.580899668 +0000
@@ -236,7 +236,7 @@
 </table>
 <a name=

      Detailed Description

      template<int dim>
      -class TorusManifold< dim >

      Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

      +class TorusManifold< dim >

      Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

      This class was developed to be used in conjunction with GridGenerator::torus.

      Definition at line 861 of file manifold_lib.h.

      @@ -702,7 +702,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

      Refer to the general documentation of this class for more information.

      @@ -732,24 +732,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -758,11 +758,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-12-27 18:25:12.628899997 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-12-27 18:25:12.636900052 +0000 @@ -221,12 +221,12 @@
      template<int dim, int spacedim = dim>
      class TransfiniteInterpolationManifold< dim, spacedim >

      A mapping class that extends curved boundary descriptions into the interior of the computational domain. The outer curved boundary description is assumed to be given by another manifold (e.g. a polar manifold on a circle). The mechanism to extend the boundary information is a so-called transfinite interpolation. The use of this class is discussed extensively in step-65.

      The formula for extending such a description in 2d is, for example, described on Wikipedia. Given a point $(u,v)$ on the chart, the image of this point in real space is given by

      -\begin{align*}
+<picture><source srcset=\begin{align*}
 \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v)
 + u \mathbf c_3(v) \\
 &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf
 x_2 + uv \mathbf x_3 \right]
-\end{align*} +\end{align*}" src="form_1537.png"/>

      where $\bf x_0, \bf x_1, \bf x_2, \bf x_3$ denote the four bounding vertices bounding the image space and $\bf c_0, \bf c_1, \bf c_2, \bf c_3$ are the four curves describing the lines of the cell. If a curved manifold is attached to any of these lines, the evaluation is done according to Manifold::get_new_point() with the two end points of the line and appropriate weight. In 3d, the generalization of this formula is implemented, creating a weighted sum of the vertices (positive contribution), the lines (negative), and the faces (positive contribution).

      This manifold is usually attached to a coarse mesh and then places new points as a combination of the descriptions on the boundaries, weighted appropriately according to the position of the point in the original chart coordinates $(u,v)$. This manifold should be preferred over setting only a curved manifold on the boundary of a mesh in most situations as it yields more uniform mesh distributions as the mesh is refined because it switches from a curved description to a straight description over all children of the initial coarse cell this manifold was attached to. This way, the curved nature of the manifold that is originally contained in one coarse mesh layer will be applied to more than one fine mesh layer once the mesh gets refined. Note that the mechanisms of TransfiniteInterpolationManifold are also built into the MappingQ class when only a surface of a cell is subject to a curved description, ensuring that even the default case without this manifold gets optimal convergence rates when applying curved boundary descriptions.

      @@ -932,11 +932,11 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      -

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      -

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      +

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      +

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-12-27 18:25:12.704900519 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-12-27 18:25:12.708900547 +0000 @@ -338,7 +338,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

      Detailed Description

      template<int structdim, int dim, int spacedim>
      -class TriaAccessor< structdim, dim, spacedim >

      A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< structdim, dim, spacedim >

      A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      There is a specialization of this class for the case where structdim equals zero, i.e., for vertices of a triangulation.

      Definition at line 756 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-12-27 18:25:12.760900904 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-12-27 18:25:12.760900904 +0000 @@ -291,7 +291,7 @@ &#href_anchor"memitem:a34cceffc302e3c23552635478b9fc983" id="r_a34cceffc302e3c23552635478b9fc983">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

      Detailed Description

      template<int spacedim>
      -class TriaAccessor< 0, 1, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< 0, 1, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

      The current specialization of the TriaAccessor<0,dim,spacedim> class for vertices of a one-dimensional triangulation exists since in the dim == 1 case vertices are also faces.

      Definition at line 2319 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:25:12.808901233 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:25:12.808901233 +0000 @@ -242,7 +242,7 @@ &#href_anchor"memitem:abda88195917e4d56f80eab016f21bde3" id="r_abda88195917e4d56f80eab016f21bde3">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

      Detailed Description

      template<int dim, int spacedim>
      -class TriaAccessor< 0, dim, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< 0, dim, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      There is a further specialization of this class for the case that dim equals one, i.e., for vertices of a one-dimensional triangulation, since in that case vertices are also faces.

      Definition at line 1907 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-12-27 18:25:12.960902277 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-12-27 18:25:12.968902332 +0000 @@ -1905,7 +1905,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -4880,7 +4880,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:13.052902909 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:13.060902964 +0000 @@ -1016,7 +1016,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      Definition at line 443 of file trilinos_block_sparse_matrix.h.

      @@ -1046,7 +1046,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Definition at line 456 of file trilinos_block_sparse_matrix.h.

      @@ -2132,7 +2132,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -2634,7 +2634,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      @@ -2742,7 +2742,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:13.124903403 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:13.128903431 +0000 @@ -1739,7 +1739,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -1765,7 +1765,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1817,7 +1817,7 @@
      -

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      +

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      @@ -1843,7 +1843,7 @@
      -

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      +

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      @@ -1869,7 +1869,7 @@
      -

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      +

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      @@ -1904,7 +1904,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2151,7 +2151,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:13.200903925 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:13.208903980 +0000 @@ -1323,7 +1323,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1395,7 +1395,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      @@ -1413,7 +1413,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      @@ -1431,7 +1431,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      @@ -1477,7 +1477,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values)

      The reason this function exists is for compatibility with deal.II's own vector classes which can implement this functionality with less memory transfer. However, for Trilinos vectors such a combined operation is not natively supported and thus the cost is completely equivalent as calling the two methods separately.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-12-27 18:25:13.236904172 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-12-27 18:25:13.244904227 +0000 @@ -319,7 +319,7 @@
      -

      A function object that users should supply and that is intended to compute the residual $F(u)$.

      +

      A function object that users should supply and that is intended to compute the residual $F(u)$.

      Note
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors for this callback, so if it throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.

      Definition at line 204 of file nox.h.

      @@ -381,7 +381,7 @@
      -

      A user function that applies the Jacobian $\nabla_u F(u)$ to x and writes the result in y. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the Jacobian $\nabla_u F(u)$ to x and writes the result in y. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is optional and is used in the case of certain configurations. For instance, this function is required if the polynomial line search (NOX::LineSearch::Polynomial) is chosen, whereas for the full step case (NOX::LineSearch::FullStep) it won't be called.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors for this callback, so if it throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.
      @@ -403,7 +403,7 @@
      -

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y and writes the result in x. The parameter tolerance specifies the error reduction if an iterative solver is used in applying the inverse matrix. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y and writes the result in x. The parameter tolerance specifies the error reduction if an iterative solver is used in applying the inverse matrix. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is optional and is used in the case of certain configurations.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can deal with "recoverable" errors for this callback, if the NOX parameter "Newton/Rescue Bad Newton Solve" is set to true (which is, in fact, its default value). If this parameters is set to true, then exceptions of type RecoverableUserCallbackError are eaten for this callback and NOX can safely proceed with a recovery step. Exceptions of other types are still treated as "irrecoverable".
      @@ -425,7 +425,7 @@
      -

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y, writes the result in x and returns the number of linear iterations the linear solver needed. The parameter tolerance species the error reduction if an iterative solver is used. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y, writes the result in x and returns the number of linear iterations the linear solver needed. The parameter tolerance species the error reduction if an iterative solver is used. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is used if solve_with_jacobian is not provided. Its return value is compared again AdditionalFlags::threshold_n_linear_iterations; if it is larger, the preconditioner will be built before the next linear system is solved. The use of this approach is predicated on the idea that one can keep using a preconditioner built earlier as long as it is a good preconditioner for the matrix currently in use – where "good" is defined as leading to a number of iterations to solve linear systems less than the threshold given by the current variable.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can deal with "recoverable" errors for this callback, if the NOX parameter "Newton/Rescue Bad Newton Solve" is set to true (which is, in fact, its default value). If this parameters is set to true, then exceptions of type RecoverableUserCallbackError are eaten for this callback and NOX can safely proceed with a recovery step. Exceptions of other types are still treated as "irrecoverable".
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-12-27 18:25:13.320904749 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-12-27 18:25:13.324904777 +0000 @@ -2108,7 +2108,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

      The vector has to be initialized with the same IndexSet the matrix was initialized with.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-12-27 18:25:13.384905189 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-12-27 18:25:13.392905243 +0000 @@ -467,7 +467,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      It is possible to specify the number of columns entries per row using the optional n_entries_per_row argument. However, this value does not need to be accurate or even given at all, since one does usually not have this kind of information before building the sparsity pattern (the usual case when the function DoFTools::make_sparsity_pattern() is called). The entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes. However, a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 100 of file trilinos_sparsity_pattern.cc.

      @@ -497,7 +497,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      The vector n_entries_per_row specifies the number of entries in each row (an information usually not available, though).

      Definition at line 109 of file trilinos_sparsity_pattern.cc.

      @@ -769,7 +769,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The number of columns entries per row is specified as the maximum number of entries argument. This does not need to be an accurate number since the entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes, but a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 214 of file trilinos_sparsity_pattern.cc.

      @@ -799,7 +799,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The vector n_entries_per_row specifies the number of entries in each row.

      Definition at line 227 of file trilinos_sparsity_pattern.cc.

      @@ -1300,7 +1300,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      Definition at line 878 of file trilinos_sparsity_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-12-27 18:25:13.448905628 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-12-27 18:25:13.448905628 +0000 @@ -323,7 +323,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

      Constructor that takes the number of locally-owned degrees of freedom local_size and the number of ghost degrees of freedom ghost_size.

      -

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      +

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      Note
      Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-12-27 18:25:13.480905848 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-12-27 18:25:13.480905848 +0000 @@ -238,8 +238,8 @@ const unsigned int column_block_size&#href_anchor"memdoc">

      Constructor for a process grid for a given mpi_communicator. In this case the process grid is heuristically chosen based on the dimensions and block-cyclic distribution of a target matrix provided in n_rows_matrix, n_columns_matrix, row_block_size and column_block_size.

      -

      The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

      -

      For example, a square matrix $640x640$ with the block size $32$ and the mpi_communicator with 11 cores will result in the $3x3$ process grid.

      +

      The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

      +

      For example, a square matrix $640x640$ with the block size $32$ and the mpi_communicator with 11 cores will result in the $3x3$ process grid.

      Definition at line 208 of file process_grid.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-12-27 18:25:13.600906672 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-12-27 18:25:13.604906699 +0000 @@ -1324,7 +1324,7 @@

      Return the scalar product of two vectors. The return type is the underlying type of this vector, so the return type and the accuracy with which it the result is computed depend on the order of the arguments of this vector.

      -

      For complex vectors, the scalar product is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex vectors, the scalar product is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1345,7 +1345,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1387,7 +1387,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1408,7 +1408,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1429,7 +1429,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1480,7 +1480,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< OtherNumber > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 2024-12-27 18:25:13.660907084 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 2024-12-27 18:25:13.660907084 +0000 @@ -235,7 +235,7 @@
      component_mask(&one, 1, 3);
      Definition point.h:111
      -

      Here, component_mask then represents a Function object that for every point returns the vector $(0, 1, 0)^T$, i.e. a mask function that could, for example, be passed to VectorTools::integrate_difference(). This effect can also be achieved using the ComponentSelectFunction class but is obviously easily extended to functions that are non-constant in their one component.

      +

      Here, component_mask then represents a Function object that for every point returns the vector $(0, 1, 0)^T$, i.e. a mask function that could, for example, be passed to VectorTools::integrate_difference(). This effect can also be achieved using the ComponentSelectFunction class but is obviously easily extended to functions that are non-constant in their one component.

      Definition at line 869 of file function.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-12-27 18:25:13.720907496 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-12-27 18:25:13.724907523 +0000 @@ -1336,7 +1336,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
      Parameters
      @@ -1432,7 +1432,7 @@

      Given a component mask (see this glossary entry ), produce a block mask (see this glossary entry ) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-12-27 18:25:13.748907688 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-12-27 18:25:13.752907715 +0000 @@ -179,7 +179,7 @@
      Parameters
      - +
      real_support_pointsThe position of the mapping support points in real space, queried by MappingQ::compute_mapping_support_points().
      unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
      unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-12-27 18:25:13.900908732 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-12-27 18:25:13.908908787 +0000 @@ -1857,7 +1857,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2424,7 +2424,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7022,7 +7022,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-12-27 18:25:14.068909885 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-12-27 18:25:14.072909913 +0000 @@ -1766,7 +1766,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2380,7 +2380,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -6999,7 +6999,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-12-27 18:25:14.244911094 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-12-27 18:25:14.248911121 +0000 @@ -2077,7 +2077,7 @@
      -

      Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

      +

      Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

      Definition at line 3696 of file tria.cc.

      @@ -2961,7 +2961,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -3420,7 +3420,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7753,7 +7753,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:14.420912302 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:14.428912357 +0000 @@ -2242,7 +2242,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2813,7 +2813,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7298,7 +7298,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-12-27 18:25:14.596913511 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-12-27 18:25:14.604913566 +0000 @@ -2392,7 +2392,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2896,7 +2896,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7230,7 +7230,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-12-27 18:25:14.768914692 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-12-27 18:25:14.768914692 +0000 @@ -2178,7 +2178,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2666,7 +2666,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7199,7 +7199,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-12-27 18:25:14.812914994 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-12-27 18:25:14.816915021 +0000 @@ -147,9 +147,9 @@
      Member DoFTools::map_dofs_to_support_points (const hp::MappingCollection< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask={})
      Use the function that returns the std::map instead.
      Member FEEvaluationData< dim, Number, is_face >::get_normal_vector (const unsigned int q_point) const
      -
      Use normal_vector() instead.
      +
      Use normal_vector() instead.
      Member FEFaceEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate_scatter (const bool integrate_values, const bool integrate_gradients, VectorType &output_vector)
      -
      Please use the integrate_scatter() function with the EvaluationFlags argument.
      +
      Please use the integrate_scatter() function with the EvaluationFlags argument.
      Member FEInterfaceViews::Vector< dim, spacedim >::average_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
      Use the average_of_hessians() function instead.
      Member FEInterfaceViews::Vector< dim, spacedim >::jump_gradient (const unsigned int interface_dof_index, const unsigned int q_point) const
      @@ -157,7 +157,7 @@
      Member FEInterfaceViews::Vector< dim, spacedim >::jump_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
      Use the average_of_hessians() function instead.
      Member FEPointEvaluationBase< n_components_, dim, spacedim, Number >::real_point (const unsigned int point_index) const
      -
      Use the function quadrature_point() instead.
      +
      Use the function quadrature_point() instead.
      Member FETools::Compositing::compute_nonzero_components (const FiniteElement< dim, spacedim > *fe1, const unsigned int N1, const FiniteElement< dim, spacedim > *fe2=nullptr, const unsigned int N2=0, const FiniteElement< dim, spacedim > *fe3=nullptr, const unsigned int N3=0, const FiniteElement< dim, spacedim > *fe4=nullptr, const unsigned int N4=0, const FiniteElement< dim, spacedim > *fe5=nullptr, const unsigned int N5=0, const bool do_tensor_product=true)
      Use the versions of this function that take a vector of elements or an initializer list as arguments.
      Member FETools::Compositing::compute_restriction_is_additive_flags (const FiniteElement< dim, spacedim > *fe1, const unsigned int N1, const FiniteElement< dim, spacedim > *fe2=nullptr, const unsigned int N2=0, const FiniteElement< dim, spacedim > *fe3=nullptr, const unsigned int N3=0, const FiniteElement< dim, spacedim > *fe4=nullptr, const unsigned int N4=0, const FiniteElement< dim, spacedim > *fe5=nullptr, const unsigned int N5=0)
      @@ -169,9 +169,9 @@
      Member FiniteElement< dim, spacedim >::get_face_data (const UpdateFlags update_flags, const Mapping< dim, spacedim > &mapping, const Quadrature< dim - 1 > &quadrature, internal::FEValuesImplementation::FiniteElementRelatedData< dim, spacedim > &output_data) const
      Use the version taking a hp::QCollection argument.
      Member GridTools::fix_up_distorted_child_cells (const typename Triangulation< dim, spacedim >::DistortedCellList &distorted_cells, Triangulation< dim, spacedim > &triangulation)
      -
      This function predates deal.II's use of manifolds and use of cell-local transfinite interpolation to place new points and is no longer necessary. See Manifolds::get_default_points_and_weights() for more information.
      +
      This function predates deal.II's use of manifolds and use of cell-local transfinite interpolation to place new points and is no longer necessary. See Manifolds::get_default_points_and_weights() for more information.
      Member GridTools::rotate (const double angle, const unsigned int axis, Triangulation< dim, 3 > &triangulation)
      -
      Use the alternative with the unit vector instead.
      +
      Use the alternative with the unit vector instead.
      Member identity
      Use std_cxx20::type_identity instead.
      Member LinearAlgebra::CUDAWrappers::Vector< Number >::import (const ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={})
      @@ -231,9 +231,9 @@
      Member parallel::fullydistributed::Triangulation< dim, spacedim >::load (const std::string &filename, const bool autopartition) override
      The autopartition parameter has been removed.
      Member ParameterHandler::ShortText
      -
      Use ShortPRM instead of ShortText.
      +
      Use ShortPRM instead of ShortText.
      Member ParameterHandler::Text
      -
      Use PRM instead of Text.
      +
      Use PRM instead of Text.
      Member Particles::ParticleAccessor< dim, spacedim >::set_property_pool (PropertyPool< dim, spacedim > &property_pool)
      This function is only kept for backward compatibility and has no meaning any more. ParticleAccessors always use the property pool of the owning particle handler.
      Member Particles::ParticleHandler< dim, spacedim >::register_load_callback_function (const bool serialization)
      @@ -241,7 +241,7 @@
      Member Particles::ParticleHandler< dim, spacedim >::register_store_callback_function ()
      Please use prepare_for_coarsening_and_refinement() or prepare_for_serialization() instead. See there for further information about the purpose of this function.
      Class PathSearch
      -
      Use the std::filesystem facilities instead.
      +
      Use the std::filesystem facilities instead.
      Member PETScWrappers::SolverBiCG::SolverBiCG (SolverControl &cn, const MPI_Comm mpi_communicator, const AdditionalData &data=AdditionalData())
      Member PETScWrappers::SolverBicgstab::SolverBicgstab (SolverControl &cn, const MPI_Comm mpi_communicator, const AdditionalData &data=AdditionalData())
      @@ -277,7 +277,7 @@
      Member Physics::Transformations::Rotations::rotation_matrix_3d (const Point< 3, Number > &axis, const Number &angle)
      Use the variant with a Tensor as an axis.
      Member PolarManifold< dim, spacedim >::center
      -
      Use get_center() instead.
      +
      Use get_center() instead.
      Member QProjector< dim >::DataSetDescriptor::face (const ReferenceCell &reference_cell, const unsigned int face_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const unsigned int n_quadrature_points)
      Use the version of this function which takes a combined_orientation argument instead.
      Member QProjector< dim >::DataSetDescriptor::face (const ReferenceCell &reference_cell, const unsigned int face_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const hp::QCollection< dim - 1 > &quadrature)
      @@ -285,7 +285,7 @@
      Member QProjector< dim >::DataSetDescriptor::subface (const ReferenceCell &reference_cell, const unsigned int face_no, const unsigned int subface_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const unsigned int n_quadrature_points, const internal::SubfaceCase< dim > ref_case=internal::SubfaceCase< dim >::case_isotropic)
      Use the version of this function which takes a combined_orientation argument instead.
      Member ReferenceCell::compute_orientation (const std::array< T, N > &vertices_0, const std::array< T, N > &vertices_1) const
      -
      Use get_combined_orientation() instead.
      +
      Use get_combined_orientation() instead.
      Member ReferenceCell::permute_according_orientation (const std::array< T, N > &vertices, const unsigned int orientation) const
      Use permute_by_combined_orientation() instead.
      Class SLEPcWrappers::TransformationSpectrumFolding
      @@ -295,7 +295,7 @@
      Member SparsityTools::distribute_sparsity_pattern (BlockDynamicSparsityPattern &dsp, const std::vector< IndexSet > &owned_set_per_cpu, const MPI_Comm mpi_comm, const IndexSet &myrange)
      Use the distribute_sparsity_pattern() with a single index set for the present MPI process only.
      Member SphericalManifold< dim, spacedim >::center
      -
      Use get_center() instead.
      +
      Use get_center() instead.
      Member SymmetricTensor< rank_, dim, Number >::begin_raw ()
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member SymmetricTensor< rank_, dim, Number >::begin_raw () const
      @@ -321,7 +321,7 @@
      Member Utilities::MPI::create_group (const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
      Use MPI_Comm_create_group directly
      Member Utilities::MPI::RemotePointEvaluation< dim, spacedim >::RemotePointEvaluation (const double tolerance, const bool enforce_unique_mapping=false, const unsigned int rtree_level=0, const std::function< std::vector< bool >()> &marked_vertices={})
      -
      +
      Member XDMFEntry::get_xdmf_content (const unsigned int indent_level, const ReferenceCell &reference_cell) const
      Use the other function instead.
      Member XDMFEntry::XDMFEntry (const std::string &filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim)
      /usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-12-27 18:25:14.848915241 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-12-27 18:25:14.848915241 +0000 @@ -198,7 +198,7 @@
      -

      One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

      +

      One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

      \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -332,7 +332,7 @@
   </tr>
 </table>
 </div><div class= -

      Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

      +

      Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

      \[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html	2024-12-27 18:25:14.872915406 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html	2024-12-27 18:25:14.876915433 +0000
@@ -171,11 +171,11 @@
 <p><a class=Quadrature coupling options when assembling quadrature formulas for double integrals.

      When computing the approximation of double integrals of the form

      -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) f(x_1) g(x_2) dT_1 dT_2,
-\] +\]" src="form_1087.png"/>

      -

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one needs to combine quadrature formulas from two different FEValuesBase objects.

      +

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one needs to combine quadrature formulas from two different FEValuesBase objects.

      This enum class provides a way to specify how the quadrature points and weights should be combined. In general, the two FEValuesBase objects provide different quadrature rules, and these can be interpreted in different ways, depending on the kernel function that is being integrated, and on how the two quadrature rules were constructed.

      This enum is used in the constructor of FECouplingValues to specify how to interpret and manipulate the quadrature points and weights of the two FEValuesBase objects.

      @@ -217,11 +217,11 @@

      DoF coupling options when assembling double integrals.

      When computing the approximation of double integrals of the form

      -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) v_i(x_1) w_j(x_2) dT_1 dT_2,
-\] +\]" src="form_1090.png"/>

      -

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one may want to combine degrees from two different FEValuesBase objects (i.e., basis functions $v_i$ and $w_j$ in the examples above).

      +

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one may want to combine degrees from two different FEValuesBase objects (i.e., basis functions $v_i$ and $w_j$ in the examples above).

      This enum class provides a way to specify how the degrees of freedom should be combined. There are two cases of interest:

      1. the two FEValuesBase objects refer to different DoFHandlers
      2. @@ -230,14 +230,14 @@

        In the first case, one usually treats the two sets of degrees of freedom as independent of each other, and the resulting matrix is generally rectangular.

        In the second case, one may choose to treat the two sets of degrees of freedom either as independent or to group them together. A similar approach is used in the FEInterfaceValues class, where the degrees of freedom of the two FEValuesBase objects are grouped together, in a contiguous way, so that the resulting basis functions are interpreted in the following way:

        -\[
+<picture><source srcset=\[
 \phi_{1,i}(x) = \begin{cases} v_i(x) & \text{ if } i \in [0,n_l) \\
 0 & \text{ if } i \in [n_1, n_1+n_2] \end{cases},\quad \phi_{1,i}(x) =
 \begin{cases} 0(x) & \text{ if } i \in [0,n_1) \\
 w_{i-n_1}(x) & \text{ if } i \in [n_1, n_1+n_2] \end{cases},
-\] +\]" src="form_1093.png"/>

        -

        where $\phi_{1,i}$ is the first basis function with index $i$ and $n_{1,2}$ are the number of local dofs on the first and second FEValuesBase objects.

        +

        where $\phi_{1,i}$ is the first basis function with index $i$ and $n_{1,2}$ are the number of local dofs on the first and second FEValuesBase objects.

        This enum is used in the constructor of FECouplingValues to specify how to interpret and manipulate the local dof indices of the two FEValuesBase objects.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-12-27 18:25:15.444919333 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-12-27 18:25:15.448919360 +0000 @@ -137,7 +137,7 @@ w_q, \]" src="form_272.png"/>

      - where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight. + where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight.

      In order to evaluate such an expression in an application code, we have to access three different kinds of objects: a quadrature object that describes locations $\hat{\bf x}_q$ and weights $w_q$ of quadrature points on the reference cell; a finite element object that describes the gradients $\hat\nabla \varphi_i(\hat{\bf x}_q)$ of shape functions on the unit cell; and a mapping object that provides the Jacobian as well as its determinant. Dealing with all these objects would be cumbersome and error prone.

      On the other hand, these three kinds of objects almost always appear together, and it is in fact very rare for deal.II application codes to do anything with quadrature, finite element, or mapping objects besides using them together. For this reason, deal.II uses the FEValues abstraction combining information on the shape functions, the geometry of the actual mesh cell and a quadrature rule on a reference cell. Upon construction it takes one object of each of the three mentioned categories. Later, it can be "re-initialized" for a concrete grid cell and then provides mapped quadrature points and weights, mapped shape function values and derivatives as well as some properties of the transformation from the reference cell to the actual mesh cell.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-12-27 18:25:15.476919552 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-12-27 18:25:15.480919580 +0000 @@ -116,7 +116,7 @@
      Enumerator
      independent&#href_anchor"fielddoc">

      The FEValuesBase objects may have different dof indices, possibly indexing different DoFHandler objects, and we are interested in assembling a generally rectangular matrix, where there is no relationship between the two index spaces.

      /usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 2024-12-27 18:25:14.952915955 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 2024-12-27 18:25:14.960916010 +0000 @@ -949,7 +949,7 @@
      1085 const std::vector<
      -
      1086 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      +
      1086 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      1087 &non_matching_faces_marked_vertices,
      1088 const unsigned int quad_no,
      1089 const unsigned int dof_no,
      @@ -1095,7 +1095,7 @@
      1229 const std::vector<
      -
      1230 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      +
      1230 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      1231 &non_matching_faces_marked_vertices,
      1232 const unsigned int n_q_pnts_1D,
      1233 const unsigned int dof_no,
      @@ -1495,7 +1495,6 @@
      static const unsigned int invalid_unsigned_int
      Definition types.h:220
      -
      unsigned int boundary_id
      Definition types.h:144
      std::vector< BoundingBox< boost::geometry::dimension< typename Rtree::indexable_type >::value > > extract_rtree_level(const Rtree &tree, const unsigned int level)
      RTree< typename LeafTypeIterator::value_type, IndexType, IndexableGetter > pack_rtree(const LeafTypeIterator &begin, const LeafTypeIterator &end)
      /usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-12-27 18:25:15.004916312 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-12-27 18:25:15.008916339 +0000 @@ -185,7 +185,7 @@

      The macro DEAL_II_CONSTEXPR expands to constexpr if the compiler supports enough constexpr features (such as loops). If the compiler does not then this macro expands to nothing.

      Functions declared as constexpr can be evaluated at compile time. Hence code like

      constexpr double det_A = determinant(A);
      DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
      -

      assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

      +

      assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

      Function Documentation

      ◆ new_task()

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-12-27 18:25:15.028916476 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-12-27 18:25:15.032916504 +0000 @@ -185,7 +185,7 @@
      template <typename VectorType>
      virtual void Tstep(VectorType &u, const VectorType &v) const =0;
      };
      -

      where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

      +

      where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

      SparsityPatternType
      /usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-12-27 18:25:15.408919086 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-12-27 18:25:15.416919140 +0000 @@ -337,7 +337,7 @@
      std::function<void(Domain &, const Range &)> Tvmult;
      std::function<void(Domain &, const Range &)> Tvmult_add;

      Thus, such an object can be used as a matrix object in all iterative solver classes, either as a matrix object, or as preconditioner.

      -

      The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

      #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
      +

      The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

      #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
      double k;
      @@ -1387,7 +1387,7 @@
      const LinearOperator< Range, Domain, Payload > & op&#href_anchor"memdoc"> -

      Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Range space. The object stores the PackagedOperation $\text{op}^T \,u$ (in matrix notation). return (return_add) are implemented with Tvmult(__1,u) (Tvmult_add(__1,u)).

      +

      Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Range space. The object stores the PackagedOperation $\text{op}^T \,u$ (in matrix notation). return (return_add) are implemented with Tvmult(__1,u) (Tvmult_add(__1,u)).

      The PackagedOperation object that is created stores a reference to u. Thus, the vector must remain a valid reference for the whole lifetime of the PackagedOperation object. All changes made on u after the creation of the PackagedOperation object are reflected by the operator object.

      Definition at line 703 of file packaged_operation.h.

      @@ -1411,7 +1411,7 @@
      const PackagedOperation< Domain > & comp&#href_anchor"memdoc"> -

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op} \,comp$ (in matrix notation).

      +

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op} \,comp$ (in matrix notation).

      Definition at line 730 of file packaged_operation.h.

      @@ -1434,7 +1434,7 @@
      const LinearOperator< Range, Domain, Payload > & op&#href_anchor"memdoc"> -

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op}^T \,comp$ (in matrix notation).

      +

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op}^T \,comp$ (in matrix notation).

      Definition at line 774 of file packaged_operation.h.

      @@ -1470,7 +1470,7 @@

      Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

      We construct the definition of the Schur complement in the following way:

      Consider a general system of linear equations that can be decomposed into two major sets of equations:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1483,60 +1483,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1940.png"/>

      -

      where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

      +

      where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

      This is equivalent to the following two statements:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1945.png"/>

      -

      Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

      -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

      +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1947.png"/>

      which amount to performing block Gaussian elimination on this system of equations.

      For the purpose of the current implementation, we choose to substitute (3) into (2)

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1948.png"/>

      This leads to the result

      -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1949.png"/>

      -

      with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

      -

      So for any arbitrary vector $ a $, the Schur complement performs the following operation:

      -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

      +

      So for any arbitrary vector $ a $, the Schur complement performs the following operation:

      +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1956.png"/>

      A typical set of steps needed the solve a linear system (1),(2) would be:

      1. Define the inverse matrix A_inv (using inverse_operator()).
      2. -
      3. Define the Schur complement $ S $ (using schur_complement()).
      4. -
      5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
      6. +
      7. Define the Schur complement $ S $ (using schur_complement()).
      8. +
      9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
      10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

        -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1958.png"/>

      11. -
      12. Solve for $ y $ in (5):

        -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

        +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1960.png"/>

      13. Perform the post-processing step from (3) using postprocess_schur_solution():

        -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1961.png"/>

      @@ -1582,10 +1582,10 @@
      LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)
      PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
      -

      In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

      -

      A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

      -

      From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

      // Construction of approximate inverse of Schur complement
      +

      In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

      +

      A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

      +

      From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

      // Construction of approximate inverse of Schur complement
      const auto A_inv_approx = linear_operator(preconditioner_A);
      const auto S_approx = schur_complement(A_inv_approx,B,C,D);
      @@ -1608,8 +1608,8 @@
      // Solve for y
      y = S_inv * rhs;
      x = postprocess_schur_solution (A_inv,B,y,f);
      -

      Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

      +

      Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

      However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

      Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc. The solution of a multi-component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc.

      See also
      Block (linear algebra)
      @@ -1646,15 +1646,15 @@
      const Range_2 & g&#href_anchor"memdoc">

      For the system of equations

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1967.png"/>

      this operation performs the pre-processing (condensation) step on the RHS subvector g so that the Schur complement can be used to solve this system of equations. More specifically, it produces an object that represents the condensed form of the subvector g, namely

      -\[
+<picture><source srcset=\[
   g' = g - C \: A^{-1} \: f
-\] +\]" src="form_1968.png"/>

      See also
      Block (linear algebra)
      @@ -1690,15 +1690,15 @@
      const Range_1 & f&#href_anchor"memdoc">

      For the system of equations

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1967.png"/>

      this operation performs the post-processing step of the Schur complement to solve for the second subvector x once subvector y is known, with the result that

      -\[
+<picture><source srcset=\[
   x =  A^{-1}(f - By)
-\] +\]" src="form_1969.png"/>

      See also
      Block (linear algebra)
      @@ -2965,7 +2965,7 @@
      &#href_anchor"memitem:">namespace  Differentiation::SD
      &#href_anchor"details" id="details">

      Detailed Description

      A group dedicated to the implementation of functions and classes that relate to automatic and symbolic differentiation.

      -

      Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

      +

      Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

      Automatic differentiation

      Automatic differentiation (commonly also referred to as algorithmic differentiation), is a numerical method that can be used to "automatically" compute the first, and perhaps higher-order, derivatives of function(s) with respect to one or more input variables. Although this comes at a certain computational cost, the benefits to using such a tool may be significant. When used correctly the derivatives of often complicated functions can be computed to a very high accuracy. Although the exact accuracy achievable by these frameworks largely depends on their underlying mathematical formulation, some implementations compute with a precision on the order of machine accuracy. Note that this is different to classical numerical differentiation (using, for example, a finite difference approximation of a function by evaluating it at different points), which has an accuracy that depends on both the perturbation size as well as the chosen finite-difference scheme; the error of these methods is measurably larger than well-formulated automatic differentiation approaches.

      @@ -164,38 +164,38 @@
    • reverse-mode (or reverse accumulation) auto-differentiation.
    • As a point of interest, the optimal Jacobian accumulation, which performs a minimal set of computations, lies somewhere between these two limiting cases. Its computation for a general composite function remains an open problem in graph theory.

      -

      With the aid of the diagram below (it and some of the listed details courtesy of this Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

      -
      Forward mode automatic differentiation
      Forward mode automatic differentiation
      Reverse mode automatic differentiation
      Reverse mode automatic differentiation

      Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

      -

      Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

      -\[
+<p>With the aid of the diagram below (it and some of the listed details courtesy of this <a href=Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

      +
      Forward mode automatic differentiation
      Forward mode automatic differentiation
      Reverse mode automatic differentiation
      Reverse mode automatic differentiation

      Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

      +

      Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

      +\[
   f (\mathbf{x})
   = f_{0} \circ f_{1} \circ f_{2} \circ \ldots \circ f_{n} (\mathbf{x})
   \quad .
-\] +\]" src="form_10.png"/>

      -

      As was previously mentioned, if each of the primitive operations $f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

      -\[
+<p> As was previously mentioned, if each of the primitive operations <picture><source srcset=$f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

      +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \dfrac{d f_{1}}{d f_{2}} \dfrac{d f_{2}}{d f_{3}} \ldots \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}}
   \quad .
-\] +\]" src="form_14.png"/>

      -

      In forward-mode, the chain-rule is computed naturally from the "inside out". The independent variables are therefore fixed, and each sub-function $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

      -\[
+<p>In forward-mode, the chain-rule is computed naturally from the $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

      +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \left( \dfrac{d f_{1}}{d f_{2}} \left(\dfrac{d f_{2}}{d f_{3}} \left(\ldots \left( \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)\right)\right)\right)
   \quad .
-\] +\]" src="form_16.png"/>

      The computational complexity of a forward-sweep is proportional to that of the input function. However, for each directional derivative that is to be computed one sweep of the computational graph is required.

      In reverse-mode, the chain-rule is computed somewhat unnaturally from the "outside in". The values of the dependent variables first get computed and fixed, and then the preceding differential operations are evaluated and multiplied in succession with the previous results from left to right. Again, if we encapsulate and fix the order of operations using parentheses, this implies that the reverse calculation is performed by

      -\[
+<picture><source srcset=\[
 \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \left( \left( \left( \left( \left( \dfrac{d f_{0}}{d f_{1}} \right) \dfrac{d f_{1}}{d f_{2}} \right) \dfrac{d f_{2}}{d f_{3}} \right) \ldots \right) \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)
   \quad .
-\] +\]" src="form_17.png"/>

      -

      The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

      +

      The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

      Overall, the efficiency of each mode is determined by the number of independent (input) variables and dependent (output) variables. If the outputs greatly exceed the inputs in number, then forward-mode can be shown to be more efficient than reverse-mode. The converse is true when the number of input variables greatly exceeds that of the output variables. This point may be used to help inform which number type is most suitable for which set of operations are to be performed using automatic differentiation. For example, in many applications for which second derivatives are to be computed it is appropriate to combine both reverse- and forward-modes. The former would then typically be used to calculate the first derivatives, and the latter the second derivatives.

      Supported automatic differentiation libraries

      @@ -343,7 +343,7 @@

      Symbolic expressions and differentiation

      Symbolic differentiation is, in terms of its design and usage, quite different to automatic differentiation. Underlying any symbolic library is a computer algebra system (CAS) that implements a language and collection of algorithms to manipulate symbolic (or "string-like") expressions. This is most similar, from a philosophical point of view, to how algebraic operations would be performed by hand.

      -

      To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

      +

      To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

      Due to the sophisticated CAS that forms the foundation of symbolic operations, the types of manipulations are not necessarily restricted to differentiation alone, but rather may span a spectrum of manipulations relevant to discrete differential calculus, topics in pure mathematics, and more. The documentation for the SymPy library gives plenty of examples that highlight what a fully-fledged CAS is capable of. Through the Differentiation::SD::Expression class, and the associated functions in the Differentiation::SD namespace, we provide a wrapper to the high-performance SymEngine symbolic manipulation library that has enriched operator overloading and a consistent interface that makes it easy and "natural" to use. In fact, this class can be used as a "drop-in" replacement for arithmetic types in many situations, transforming the operations from being numeric to symbolic in nature; this is made especially easy when classes are templated on the underlying number type. Being focused on numerical simulation of PDEs, the functionality of the CAS that is exposed within deal.II focuses on symbolic expression creation, manipulation, and differentiation.

      The convenience wrappers to SymEngine functionality are primarily focused on manipulations that solely involve dictionary-based (i.e., something reminiscent of "string-based") operations. Although SymEngine performs these operations in an efficient manner, they are still known to be computationally expensive, especially when the operations are performed on large expressions. It should therefore be expected that the performance of the parts of code that perform differentiation, symbolic substitution, etc., may be a limiting factor when using this in production code. deal.II therefore provides an interface to accelerate the evaluation of lengthy symbolic expression through the BatchOptimizer class (itself often leveraging functionality provided by SymEngine). In particular, the BatchOptimizer simultaneously optimizes a collection of symbolic expressions using methods such as common subexpression elimination (CSE), as well as by generating high performance code-paths to evaluate these expressions through the use of a custom-generated std::function or by compiling the expression using the LLVM JIT compiler. The usage of the Differentiation::SD::BatchOptimizer class is exemplified in step-71.

      As a final note, it is important to recognize the remaining major deficiencies in deal.II's current implementation of the interface to the supported symbolic library. The level of functionality currently implemented effectively limits the use of symbolic algebra to the traditional use case (i.e. scalar and tensor algebra, as might be useful to define constitutive relations or complex functions for application as boundary conditions or source terms). In fact, step-71 demonstrates how it can be used to implement challenging constitutive models. In the future we will also implement classes to assist in performing assembly operations in the same spirit as that which has been done in the Differentiation::AD namespace.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-12-27 18:25:15.556920102 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-12-27 18:25:15.560920129 +0000 @@ -216,7 +216,7 @@
    • If you have boundary conditions that set a certain part of the solution's value, for example no normal flux, $\mathbf n \cdot
   \mathbf u=0$ (as happens in flow problems and is handled by the VectorTools::compute_no_normal_flux_constraints function) or prescribed tangential components, $\mathbf{n}\times\mathbf{u}=
   \mathbf{n}\times\mathbf{f}$ (as happens in electromagnetic problems and is handled by the VectorTools::project_boundary_values_curl_conforming function). For the former case, imagine for example that we are at at vertex where the normal vector has the form $\frac 1{\sqrt{14}}
-  (1,2,3)^T$ and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
    • + (1,2,3)^T$" src="form_43.png"/> and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
    • If you have hanging node constraints, for example in a mesh like this:
      @@ -309,7 +309,7 @@ \]" src="form_70.png"/>

      instead (see, for example, [Shephard1984]).

      -

      Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

      +

      Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

      \[
  x \leftarrow C\,x+k.
 \] @@ -376,7 +376,7 @@

    • Compute which entries of a matrix built on the given dof_handler may possibly be nonzero, and create a sparsity pattern object that represents these nonzero locations.

      -

      This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

      +

      This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

      This algorithm makes no distinction between shape functions on each cell, i.e., it simply couples all degrees of freedom on a cell with all other degrees of freedom on a cell. This is often the case, and always a safe assumption. However, if you know something about the structure of your operator and that it does not couple certain shape functions with certain test functions, then you can get a sparser sparsity pattern by calling a variant of the current function described below that allows to specify which vector components couple with which other vector components.

      The method described above lives on the assumption that coupling between degrees of freedom only happens if shape functions overlap on at least one cell. This is the case with most usual finite element formulations involving conforming elements. However, for formulations such as the Discontinuous Galerkin finite element method, the bilinear form contains terms on interfaces between cells that couple shape functions that live on one cell with shape functions that live on a neighboring cell. The current function would not see these couplings, and would consequently not allocate entries in the sparsity pattern. You would then get into trouble during matrix assembly because you try to write into matrix entries for which no space has been allocated in the sparsity pattern. This can be avoided by calling the DoFTools::make_flux_sparsity_pattern() function instead, which takes into account coupling between degrees of freedom on neighboring cells.

      There are other situations where bilinear forms contain non-local terms, for example in treating integral equations. These require different methods for building the sparsity patterns that depend on the exact formulation of the problem. You will have to do this yourself then.

      @@ -446,7 +446,7 @@ -\Delta \mathbf u + \nabla p &= 0,\\ \text{div}\ u &= 0 \end{align*}" src="form_1013.png"/>

      -

      in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

      +

      in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

      \[
 \left[
@@ -763,9 +763,9 @@
 <div class=

      LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)

      and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

      This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

      -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

      with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

      A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

      @@ -801,9 +801,9 @@

      with

      This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

      -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

      with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

      A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

      @@ -1161,27 +1161,27 @@

      This function is an updated version of the project_boundary_values_curl_conforming function. The intention is to fix a problem when using the previous function in conjunction with non-rectangular geometries (i.e. elements with non-rectangular faces). The L2-projection method used has been taken from the paper "Electromagnetic scattering simulation using an H (curl) conforming hp-finite element method in three dimensions" by PD Ledger, K Morgan and O Hassan ( Int. J. Num. Meth. Fluids, Volume 53, Issue 8, pages 1267-1296).

      -

      This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

      +

      This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

      Computing constraints

      To compute the constraints we use a projection method based upon the paper mentioned above. In 2d this is done in a single stage for the edge-based shape functions, regardless of the order of the finite element. In 3d this is done in two stages, edges first and then faces.

      -

      For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

      -

      $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

      -

      $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

      -

      with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

      -

      Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

      -

      For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

      -

      $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
-\vec{s}_{j}) dS$

      -

      $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
-\vec{s}_i) dS$

      -

      and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

      -

      The resulting constraints are then given in the solutions $x$ and $y$.

      +

      For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

      +

      $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

      +

      $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

      +

      with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

      +

      Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

      +

      For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

      +

      $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
+\vec{s}_{j}) dS$

      +

      $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
+\vec{s}_i) dS$

      +

      and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

      +

      The resulting constraints are then given in the solutions $x$ and $y$.

      If the AffineConstraints constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

      Arguments to this function

      This function is explicitly for use with FE_Nedelec elements, or with FESystem elements which contain FE_Nedelec elements. It will throw an exception if called with any other finite element. The user must ensure that FESystem elements are correctly setup when using this function as this check not possible in this case.

      -

      The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

      +

      The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

      The parameter boundary_component corresponds to the number boundary_id of the face. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces.

      -

      The last argument is denoted to compute the normal vector $\vec{n}$ at the boundary points.

      +

      The last argument is denoted to compute the normal vector $\vec{n}$ at the boundary points.

      See also
      Glossary entry on boundary indicators
      @@ -1264,11 +1264,11 @@ const Mapping< dim > & mapping&#href_anchor"memdoc"> -

      Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

      +

      Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

      This function is explicitly written to use with the FE_RaviartThomas elements. Thus it throws an exception, if it is called with other finite elements.

      If the AffineConstraints object constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

      -

      The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

      -

      The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector $\vec{n}$ at the boundary points.

      +

      The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

      +

      The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector $\vec{n}$ at the boundary points.

      Computing constraints

      To compute the constraints we use interpolation operator proposed in Brezzi, Fortin (Mixed and Hybrid Finite Element Methods, Springer, 1991) on every face located at the boundary.

      See also
      Glossary entry on boundary indicators
      @@ -1359,16 +1359,16 @@ const bool use_manifold_for_normal = true&#href_anchor"memdoc"> -

      This function computes the constraints that correspond to boundary conditions of the form $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$, i.e., normal flux constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is a prescribed vector field whose normal component we want to be equal to the normal component of the solution. These conditions have exactly the form handled by the AffineConstraints class, in that they relate a linear combination of boundary degrees of freedom to a corresponding value (the inhomogeneity of the constraint). Consequently, the current function creates a list of constraints that are written into an AffineConstraints container. This object may already have some content, for example from hanging node constraints, that remains untouched. These constraints have to be applied to the linear system like any other such constraints, i.e., you have to condense the linear system with the constraints before solving, and you have to distribute the solution vector afterwards.

      -

      This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where $\vec u_\Gamma \cdot \vec n = 0$, and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

      +

      This function computes the constraints that correspond to boundary conditions of the form $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$, i.e., normal flux constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is a prescribed vector field whose normal component we want to be equal to the normal component of the solution. These conditions have exactly the form handled by the AffineConstraints class, in that they relate a linear combination of boundary degrees of freedom to a corresponding value (the inhomogeneity of the constraint). Consequently, the current function creates a list of constraints that are written into an AffineConstraints container. This object may already have some content, for example from hanging node constraints, that remains untouched. These constraints have to be applied to the linear system like any other such constraints, i.e., you have to condense the linear system with the constraints before solving, and you have to distribute the solution vector afterwards.

      +

      This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where $\vec u_\Gamma \cdot \vec n = 0$, and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

      Note
      This function doesn't make much sense in 1d, so it throws an exception if dim equals one.

      Arguments to this function

      -

      The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
-B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

      +

      The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
+B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

      The third argument denotes the set of boundary indicators on which the boundary condition is to be enforced. Note that, as explained below, this is one of the few functions where it makes a difference where we call the function multiple times with only one boundary indicator, or whether we call the function once with the whole set of boundary indicators at once.

      -

      Argument four (function_map) describes the boundary function $\vec
-u_\Gamma$ for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

      -

      The mapping argument is used to compute the location of points on the boundary at which the function needs to request the normal vector $\vec n$ from the Manifold description if use_manifold_for_normal is set. If this parameter is not set, the mapping is used for computing the normal. This is useful, e.g., in the case that the mapping describes a deformation (e.g., MappingQCache, MappingQEulerian, MappingFEField).

      +

      Argument four (function_map) describes the boundary function $\vec
+u_\Gamma$ for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

      +

      The mapping argument is used to compute the location of points on the boundary at which the function needs to request the normal vector $\vec n$ from the Manifold description if use_manifold_for_normal is set. If this parameter is not set, the mapping is used for computing the normal. This is useful, e.g., in the case that the mapping describes a deformation (e.g., MappingQCache, MappingQEulerian, MappingFEField).

      Note
      When combining adaptively refined meshes with hanging node constraints and boundary conditions like from the current function within one AffineConstraints object, the hanging node constraints should always be set first, and then the boundary conditions since boundary conditions are not set in the second operation on degrees of freedom that are already constrained. This makes sure that the discretization remains conforming as is needed. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

      Computing constraints in 2d

      Computing these constraints requires some smarts. The main question revolves around the question what the normal vector is. Consider the following situation:

      @@ -1376,23 +1376,23 @@
      -

      Here, we have two cells that use a bilinear mapping (i.e., MappingQ(1)). Consequently, for each of the cells, the normal vector is perpendicular to the straight edge. If the two edges at the top and right are meant to approximate a curved boundary (as indicated by the dashed line), then neither of the two computed normal vectors are equal to the exact normal vector (though they approximate it as the mesh is refined further). What is worse, if we constrain $\vec u \cdot \vec n=
-\vec u_\Gamma \cdot \vec n$ at the common vertex with the normal vector from both cells, then we constrain the vector $\vec u$ with respect to two linearly independent vectors; consequently, the constraint would be $\vec u=\vec u_\Gamma$ at this point (i.e. all components of the vector), which is not what we wanted.

      -

      To deal with this situation, the algorithm works in the following way: at each point where we want to constrain $\vec u$, we first collect all normal vectors that adjacent cells might compute at this point. We then do not constrain $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$ for each of these normal vectors but only for the average of the normal vectors. In the example above, we therefore record only a single constraint $\vec u \cdot \vec {\bar n}=\vec u_\Gamma \cdot \vec
-{\bar n}$, where $\vec {\bar n}$ is the average of the two indicated normal vectors.

      +

      Here, we have two cells that use a bilinear mapping (i.e., MappingQ(1)). Consequently, for each of the cells, the normal vector is perpendicular to the straight edge. If the two edges at the top and right are meant to approximate a curved boundary (as indicated by the dashed line), then neither of the two computed normal vectors are equal to the exact normal vector (though they approximate it as the mesh is refined further). What is worse, if we constrain $\vec u \cdot \vec n=
+\vec u_\Gamma \cdot \vec n$ at the common vertex with the normal vector from both cells, then we constrain the vector $\vec u$ with respect to two linearly independent vectors; consequently, the constraint would be $\vec u=\vec u_\Gamma$ at this point (i.e. all components of the vector), which is not what we wanted.

      +

      To deal with this situation, the algorithm works in the following way: at each point where we want to constrain $\vec u$, we first collect all normal vectors that adjacent cells might compute at this point. We then do not constrain $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$ for each of these normal vectors but only for the average of the normal vectors. In the example above, we therefore record only a single constraint $\vec u \cdot \vec {\bar n}=\vec u_\Gamma \cdot \vec
+{\bar n}$, where $\vec {\bar n}$ is the average of the two indicated normal vectors.

      Unfortunately, this is not quite enough. Consider the situation here:

      -

      If again the top and right edges approximate a curved boundary, and the left boundary a separate boundary (for example straight) so that the exact boundary has indeed a corner at the top left vertex, then the above construction would not work: here, we indeed want the constraint that $\vec u$ at this point (because the normal velocities with respect to both the left normal as well as the top normal vector should be zero), not that the velocity in the direction of the average normal vector is zero.

      +

      If again the top and right edges approximate a curved boundary, and the left boundary a separate boundary (for example straight) so that the exact boundary has indeed a corner at the top left vertex, then the above construction would not work: here, we indeed want the constraint that $\vec u$ at this point (because the normal velocities with respect to both the left normal as well as the top normal vector should be zero), not that the velocity in the direction of the average normal vector is zero.

      Consequently, we use the following heuristic to determine whether all normal vectors computed at one point are to be averaged: if two normal vectors for the same point are computed on different cells, then they are to be averaged. This covers the first example above. If they are computed from the same cell, then the fact that they are different is considered indication that they come from different parts of the boundary that might be joined by a real corner, and must not be averaged.

      There is one problem with this scheme. If, for example, the same domain we have considered above, is discretized with the following mesh, then we get into trouble:

      -

      Here, the algorithm assumes that the boundary does not have a corner at the point where faces $F1$ and $F2$ join because at that point there are two different normal vectors computed from different cells. If you intend for there to be a corner of the exact boundary at this point, the only way to deal with this is to assign the two parts of the boundary different boundary indicators and call this function twice, once for each boundary indicators; doing so will yield only one normal vector at this point per invocation (because we consider only one boundary part at a time), with the result that the normal vectors will not be averaged. This situation also needs to be taken into account when using this function around reentrant corners on Cartesian meshes. If normal-flux boundary conditions are to be enforced on non-Cartesian meshes around reentrant corners, one may even get cycles in the constraints as one will in general constrain different components from the two sides. In that case, set a no-slip constraint on the reentrant vertex first.

      +

      Here, the algorithm assumes that the boundary does not have a corner at the point where faces $F1$ and $F2$ join because at that point there are two different normal vectors computed from different cells. If you intend for there to be a corner of the exact boundary at this point, the only way to deal with this is to assign the two parts of the boundary different boundary indicators and call this function twice, once for each boundary indicators; doing so will yield only one normal vector at this point per invocation (because we consider only one boundary part at a time), with the result that the normal vectors will not be averaged. This situation also needs to be taken into account when using this function around reentrant corners on Cartesian meshes. If normal-flux boundary conditions are to be enforced on non-Cartesian meshes around reentrant corners, one may even get cycles in the constraints as one will in general constrain different components from the two sides. In that case, set a no-slip constraint on the reentrant vertex first.

      Computing constraints in 3d

      The situation is more complicated in 3d. Consider the following case where we want to compute the constraints at the marked vertex:

      @@ -1512,7 +1512,7 @@ const bool use_manifold_for_normal = true&#href_anchor"memdoc"> -

      This function does the same as the compute_nonzero_normal_flux_constraints() function (see there for more information), but for the simpler case of homogeneous normal-flux constraints, i.e., for imposing the condition $\vec u \cdot \vec n= 0$. This function is used in step-31 and step-32.

      +

      This function does the same as the compute_nonzero_normal_flux_constraints() function (see there for more information), but for the simpler case of homogeneous normal-flux constraints, i.e., for imposing the condition $\vec u \cdot \vec n= 0$. This function is used in step-31 and step-32.

      To compute no normal-flux constraints for a specific multigrid level for the geometric multigrid method, see compute_no_normal_flux_constraints_on_level().

      See also
      Glossary entry on boundary indicators
      @@ -1613,7 +1613,7 @@ const bool use_manifold_for_normal = true&#href_anchor"memdoc"> -

      Compute the constraints that correspond to boundary conditions of the form $\vec u \times \vec n=\vec u_\Gamma \times \vec n$, i.e., tangential flow constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is prescribed vector field whose tangential component(s) we want to be equal to the tangential component(s) of the solution. This function constrains exactly those dim-1 vector-valued components that are left unconstrained by VectorTools::compute_no_normal_flux_constraints(), and leaves the one component unconstrained that is constrained by that function.

      +

      Compute the constraints that correspond to boundary conditions of the form $\vec u \times \vec n=\vec u_\Gamma \times \vec n$, i.e., tangential flow constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is prescribed vector field whose tangential component(s) we want to be equal to the tangential component(s) of the solution. This function constrains exactly those dim-1 vector-valued components that are left unconstrained by VectorTools::compute_no_normal_flux_constraints(), and leaves the one component unconstrained that is constrained by that function.

      Further reading

      A description of some of the techniques used in this function, along with a discussion of difficulties encountered with this kind of boundary conditions can be found in [Engelman1982] .

      See also
      Glossary entry on boundary indicators
      @@ -1756,9 +1756,9 @@
      Id_c = project_to_constrained_linear_operator(constraints, linop);

      and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

      This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

      -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] /usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-12-27 18:25:15.592920349 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-12-27 18:25:15.596920376 +0000 @@ -130,7 +130,7 @@ * fe_collection.push_back (FE_Q<dim>(degree)); *

      This way, one can add elements of polynomial degree 1 through 4 to the collection. It is not necessary to retain the added object: the collection makes a copy of it, it does not only store a pointer to the given finite element object. This same observation also holds for the other collection classes.

      It is customary that within an hp-finite element program, one keeps collections of finite elements and quadrature formulas with the same number of elements, each element of the one collection matching the element in the other. This is not necessary, but it often makes coding a lot simpler. If a collection of mappings is used, the same holds for hp::MappingCollection objects as well.

      -

      Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

      +

      Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

      By default, we assume that finite elements are stored in an ascending order based on their polynomial degree. If the order of elements differs, a corresponding hierarchy needs to be supplied to the collection via the hp::FECollection::set_hierarchy() member function.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-12-27 18:25:15.616920514 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-12-27 18:25:15.620920541 +0000 @@ -179,7 +179,7 @@
      -

      A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

      +

      A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

      Definition at line 692 of file mapping_q.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-12-27 18:25:15.660920816 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-12-27 18:25:15.660920816 +0000 @@ -260,7 +260,7 @@

      From the examples above, it is obvious that if we encounter a cell that cannot be added to the cells which have already been entered, we can not usually point to a cell that is the culprit and that must be entered in a different orientation. Furthermore, even if we knew which cell, there might be large number of cells that would then cease to fit into the grid and which we would have to find a different orientation as well (in the second example above, if we rotated cell 1, then we would have to rotate the cells 1 through N-1 as well).

      A brute force approach to this problem is the following: if cell N can't be added, then try to rotate cell N-1. If we can't rotate cell N-1 any more, then try to rotate cell N-2 and try to add cell N with all orientations of cell N-1. And so on. Algorithmically, we can visualize this by a tree structure, where node N has as many children as there are possible orientations of node N+1 (in two space dimensions, there are four orientations in which each cell can be constructed from its four vertices; for example, if the vertex indices are {0 1 3 2}, then the four possibilities would be {0, 1, 3, 2}, {1, 3, 2, 0}, {3, 2, 0, 1}, and {2, 0, 1, 3}. When adding one cell after the other, we traverse this tree in a depth-first (pre-order) fashion. When we encounter that one path from the root (cell 0) to a leaf (the last cell) is not allowed (i.e. that the orientations of the cells which are encoded in the path through the tree does not lead to a valid triangulation), we have to track back and try another path through the tree.

      In practice, of course, we do not follow each path to a final node and then find out whether a path leads to a valid triangulation, but rather use an inductive argument: if for all previously added cells the triangulation is a valid one, then we can find out whether a path through the tree can yield a valid triangulation by checking whether entering the present cell would introduce any faces that have a nonunique direction; if that is so, then we can stop following all paths below this point and track back immediately.

      -

      Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

      +

      Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

      In fact, the two examples above show that the exponential estimate is not a pessimistic one: we indeed have to track back to one of the very first cells there to find a way to add all cells in a consistent fashion.

      This discouraging situation is greatly improved by the fact that we have an alternative algorithm for 2d that is always linear in runtime (discovered and implemented by Michael Anderson of TICAM, University of Texas, in 2003), and that for 3d we can find an algorithm that in practice is usually only roughly linear in time and memory. We will describe these algorithms in the following. A full description and theoretical analysis is given in [AABB17] .

      The 2d linear complexity algorithm

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-12-27 18:25:15.700921091 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-12-27 18:25:15.708921146 +0000 @@ -310,7 +310,7 @@
      },
      1000);
      void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
      Definition parallel.h:165
      -

      In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

      InputIterator1 in_1 = x.begin();
      +

      In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

      InputIterator1 in_1 = x.begin();
      InputIterator2 in_2 = y.begin();
      OutputIterator out = z.begin();
      @@ -403,7 +403,7 @@
      }
      void apply_to_subranges(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, const Function &f, const unsigned int grainsize)
      Definition parallel.h:452

      Here, we call the vmult_on_subrange function on sub-ranges of at least 200 elements each, so that the initial setup cost can amortize.

      -

      A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

      double SparseMatrix::mat_norm (const Vector &x) const
      +

      A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

      double SparseMatrix::mat_norm (const Vector &x) const
      {
      const double *val_ptr = &values[0];
      const unsigned int *colnum_ptr = &colnums[0];
      @@ -606,7 +606,7 @@

    • -

      The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

      +

      The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

      The way to avoid this is to put the FEValues object into a second structure that will hold scratch data, and initialize it in the constructor:

      struct PerTaskData {
      FullMatrix<double> cell_matrix;
      Vector<double> cell_rhs;
      /usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-12-27 18:25:15.748921420 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-12-27 18:25:15.756921475 +0000 @@ -294,8 +294,8 @@ \right) \end{eqnarray*}" src="form_302.png"/>

      -

      indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

      -

      Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

      +

      indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

      +

      Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

      \begin{eqnarray*}
   V =
   \left(
@@ -463,7 +463,7 @@
 <p class=

    • -

      These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

      +

      These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

    • @@ -607,10 +607,10 @@
      }
    • So if, again, this is not the code we use in step-8, what do we do there? The answer rests on the finite element we use. In step-8, we use the following element:

      FESystem<dim> finite_element (FE_Q<dim>(1), dim);
      -

      In other words, the finite element we use consists of dim copies of the same scalar element. This is what we call a primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
+</div><!-- fragment --><p> In other words, the finite element we use consists of <code>dim</code> copies of the same scalar element. This is what we call a <a class=primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
    \partial_y\varphi_y(x,y,z) + \partial_z\varphi_z(x,y,z)$ of a vector-valued shape function $\Phi(x,y,z)=(\varphi_x(x,y,z), \varphi_y(x,y,z), \varphi_z(x,y,z))^T$ is, in the present case, either $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z)$, $\mathrm{div}\ \Phi(x,y,z)=\partial_y\varphi_y(x,y,z)$, or $\mathrm{div}\ \Phi(x,y,z)=\partial_z\varphi_z(x,y,z)$, because exactly one of the $\varphi_\ast$ is nonzero. Knowing this means that we can save a number of computations that, if we were to do them, would only yield zeros to add up.

      In a similar vein, if only one component of a shape function is nonzero, then only one row of its gradient $\nabla\Phi$ is nonzero. What this means for terms like $(\mu \nabla\Phi_i,\nabla\Phi_j)$, where the scalar product between two tensors is defined as $(\tau, \gamma)_\Omega=\int_\Omega \sum_{i,j=1}^d \tau_{ij} \gamma_{ij}$, is that the term is only nonzero if both tensors have their nonzero entries in the same row, which means that the two shape functions have to have their single nonzero component in the same location.

      -

      If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

      +

      If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

      The vehicle for all this is the ability to determine which vector component is going to be nonzero. This information is provided by the FiniteElement::system_to_component_index function. What can be done with it, using the example above, is explained in detail in step-8.

      Block solvers

      Using techniques as shown above, it isn't particularly complicated to assemble the linear system, i.e. matrix and right hand side, for a vector-valued problem. However, then it also has to be solved. This is more complicated. Naively, one could just consider the matrix as a whole. For most problems, this matrix is not going to be definite (except for special cases like the elasticity equations covered in step-8 and step-17). It will, often, also not be symmetric. This rather general class of matrices presents problems for iterative solvers: the lack of structural properties prevents the use of most efficient methods and preconditioners. While it can be done, the solution process will therefore most often be slower than necessary.

      @@ -628,7 +628,7 @@ \right), \end{eqnarray*}" src="form_337.png"/>

      -

      where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

      +

      where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

      By default, this is not what happens, however. Rather, deal.II assigns numbers to degrees of freedom in a rather random manner. Consequently, if you form a vector out of the values of degrees of freedom will not be neatly ordered in a vector like

      \begin{eqnarray*}
   \left(
@@ -668,8 +668,8 @@
   MU = F-BP.
 \end{eqnarray*}

      -

      This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

      -

      How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

      +

      This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

      +

      How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

      This is where the BlockVector, BlockSparseMatrix, and similar classes come in. For all practical purposes, then can be used as regular vectors or sparse matrices, i.e. they offer element access, provide the usual vector operations and implement, for example, matrix-vector multiplications. In other words, assembling matrices and right hand sides works in exactly the same way as for the non-block versions. That said, internally they store the elements of vectors and matrices in "blocks"; for example, instead of using one large array, the BlockVector class stores it as a set of arrays each of which we call a block. The advantage is that, while the whole thing can be used as a vector, one can also access an individual block which then, again, is a vector with all the vector operations.

      To show how to do this, let us consider the second equation $MU=F-BP$ to be solved above. This can be achieved using the following sequence similar to what we have in step-20:

      Vector<double> tmp (solution.block(0).size());
      system_matrix.block(0,1).vmult (tmp, solution.block(1));
      @@ -689,7 +689,7 @@ -

      What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

      +

      What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

      Extracting data from solutions

      Once one has computed a solution, it is often necessary to evaluate it at quadrature points, for example to evaluate nonlinear residuals for the next Newton iteration, to evaluate the finite element residual for error estimators, or to compute the right hand side for the next time step in a time dependent problem.

      The way this is done us to again use an FEValues object to evaluate the shape functions at quadrature points, and with those also the values of a finite element function. For the example of the mixed Laplace problem above, consider the following code after solving:

      std::vector<Vector<double> > local_solution_values (n_q_points,
      /usr/share/doc/packages/dealii/doxygen/deal.II/index.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-12-27 18:25:15.780921640 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-12-27 18:25:15.784921667 +0000 @@ -132,7 +132,7 @@
    • DoFHandler: DoFHandler objects are the confluence of triangulations and finite elements: the finite element class describes how many degrees of freedom it needs per vertex, line, or cell, and the DoFHandler class allocates this space so that each vertex, line, or cell of the triangulation has the correct number of them. It also gives them a global numbering.

      -

      A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

      +

      A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

      Just as with triangulation objects, most operations on DoFHandlers are done by looping over all cells and doing something on each or a subset of them. The interfaces of the two classes are therefore rather similar: they allow to get iterators to the first and last cell (or face, or line, etc) and offer information through these iterators. The information that can be gotten from these iterators is the geometric and topological information that can already be gotten from the triangulation iterators (they are in fact derived classes) as well as things like the global numbers of the degrees of freedom on the present cell. On can also ask an iterator to extract the values corresponding to the degrees of freedom on the present cell from a data vector that stores values for all degrees of freedom associated with a triangulation.

      It is worth noting that, just as triangulations, DoFHandler classes do not know anything about the mapping from the unit cell to its individual cells. It is also ignorant of the shape functions that correspond to the degrees of freedom it manages: all it knows is that there are, for example, 2 degrees of freedom for each vertex and 4 per cell interior. Nothing about their specifics is relevant to the DoFHandler class with the exception of the fact that they exist.

      The DoFHandler class and its associates are described in the Degrees of Freedom topic. In addition, there are specialized versions that can handle multilevel and hp-discretizations. These are described in the Multilevel support and hp-finite element support topics. Finite element methods frequently imply constraints on degrees of freedom, such as for hanging nodes or nodes at which boundary conditions apply; dealing with such constraints is described in the Constraints on degrees of freedom topic.

      /usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-12-27 18:25:15.808921832 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-12-27 18:25:15.808921832 +0000 @@ -155,7 +155,7 @@
    • -

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      +

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      is.add_range(0, N);

      This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

      if (my_index_set == complete_index_set(my_index_set.size())
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-12-27 18:25:15.836922024 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-12-27 18:25:15.836922024 +0000 @@ -145,11 +145,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Check if data on all children match, and return value of the first child.

      -\[
+<picture><source srcset=\[
   d_{K_p} = d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2234.png"/>

      @@ -173,13 +173,13 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Return sum of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2235.png"/>

      -

      This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

      +

      This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

      @@ -200,15 +200,15 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc"> -

      Return $ l_2 $-norm of data on all children.

      +

      Return $ l_2 $-norm of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p}^2 = \sum d_{K_c}^2
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2237.png"/>

      -

      This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

      +

      This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

      @@ -231,11 +231,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Return mean value of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2238.png"/>

      @@ -259,11 +259,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Return maximum value of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p} = \max \left( d_{K_c} \right)
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2239.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-12-27 18:25:15.860922189 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-12-27 18:25:15.864922217 +0000 @@ -141,11 +141,11 @@ const value_type parent_value&#href_anchor"memdoc">

      Return a vector containing copies of data of the parent cell for each child.

      -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2231.png"/>

      @@ -169,13 +169,13 @@ const value_type parent_value&#href_anchor"memdoc">

      Return a vector which contains data of the parent cell being equally divided among all children.

      -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2232.png"/>

      -

      This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

      +

      This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

      @@ -198,13 +198,13 @@ const value_type parent_value&#href_anchor"memdoc">

      Return a vector which contains squared data of the parent cell being equally divided among the squares of all children.

      -\[
+<picture><source srcset=\[
   d_{K_c}^2 = d_{K_p}^2 / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2233.png"/>

      -

      This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

      +

      This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-12-27 18:25:15.888922381 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-12-27 18:25:15.888922381 +0000 @@ -128,7 +128,7 @@
      -

      The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

      +

      The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

      By passing a set of enums of the current kind to the DataOut_DoFData::add_data_vector functions, this can be achieved.

      See the step-22 tutorial program for an example on how this information can be used in practice.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-12-27 18:25:15.940922739 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-12-27 18:25:15.944922766 +0000 @@ -551,7 +551,7 @@

      While this discussion applies to two spatial dimensions, it is more complicated in 3d. The reason is that we could still use patches, but it is difficult when trying to visualize them, since if we use a cut through the data (by, for example, using x- and z-coordinates, a fixed y-value and plot function values in z-direction, then the patched data is not a patch in the sense GNUPLOT wants it any more. Therefore, we use another approach, namely writing the data on the 3d grid as a sequence of lines, i.e. two points each associated with one or more data sets. There are therefore 12 lines for each subcells of a patch.

      Given the lines as described above, a cut through this data in Gnuplot can then be achieved like this:

      *   set data style lines
       *   splot [:][:][0:] "T" using 1:2:(\$3==.5 ? \$4 : -1)
      -* 

      This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

      +*

      This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

      More complex cuts are possible, including nonlinear ones. Note however, that only those points which are actually on the cut-surface are plotted.

      Definition at line 3556 of file data_out_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-12-27 18:25:15.972922958 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-12-27 18:25:15.980923013 +0000 @@ -133,17 +133,17 @@

      Detailed Description

      This namespace provides functions that compute a cell-wise approximation of the norm of a derivative of a finite element field by taking difference quotients between neighboring cells. This is a rather simple but efficient form to get an error indicator, since it can be computed with relatively little numerical effort and yet gives a reasonable approximation.

      -

      The way the difference quotients are computed on cell $K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
-}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
-\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
-\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
+<p>The way the difference quotients are computed on cell <picture><source srcset=$K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
+}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
+\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
+\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
 \right) \nabla u(x_K) \approx \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|}
-\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

      -

      Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
-\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
+\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

      +

      Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
+\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \frac{u_h(x_{K'}) - u_h(x_K)}{
-\|y_{K'}\| } \right).$ This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

      -

      The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

      --------------------------------------------------------
      +\|y_{K'}\| } \right).$" src="form_2259.png"/> This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

      +

      The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

      --------------------------------------------------------
      An error occurred in line <749>
      of file <source/numerics/derivative_approximation.cc> in function
      void DerivativeApproximation::approximate(...)
      @@ -161,19 +161,19 @@
      DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)

      As can easily be verified, this can only happen on very coarse grids, when some cells and all their neighbors have not been refined even once. You should therefore only call the functions of this class if all cells are at least once refined. In practice this is not much of a restriction.

      Approximation of higher derivatives

      -

      Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula $ \nabla^2 u(x_K) \approx Y^{-1}
+<p>Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula   <picture><source srcset=$ \nabla^2 u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \otimes \frac{\nabla u_h(x_{K'})
-- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $ where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
-\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

      -

      The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

      +- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $" src="form_2261.png"/> where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
+\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

      +

      The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

      Even higher than the second derivative can be obtained along the same lines as exposed above.

      Refinement indicators based on the derivatives

      -

      If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
-\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
-\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

      +

      If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
+\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
+\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

      Likewise, for the second derivative, one should choose a power of the mesh size $h$ one higher than for the gradient.

      Implementation

      -

      The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

      +

      The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

      Due to this way of operation, the class may be easily extended for higher order derivatives than are presently implemented. Basically, only an additional class along the lines of the derivative descriptor classes Gradient and SecondDerivative has to be implemented, with the respective alias and functions replaced by the appropriate analogues for the derivative that is to be approximated.

      Function Documentation

      @@ -281,7 +281,7 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

      +

      This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

      The last parameter denotes the solution component, for which the gradient is to be computed. It defaults to the first component. For scalar elements, this is the only valid choice; for vector-valued ones, any component between zero and the number of vector components can be given here.

      In a parallel computation the solution vector needs to contain the locally relevant unknowns.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-12-27 18:25:16.096923810 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-12-27 18:25:16.104923865 +0000 @@ -2520,7 +2520,7 @@

      Return an Expression representing a scalar symbolic variable with the identifier specified by symbol.

      -

      For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

      +

      For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

      Parameters
      @@ -3660,7 +3660,7 @@

      Return a substitution map that has any explicit interdependencies between the entries of the input substitution_map resolved.

      The force_cyclic_dependency_resolution flag exists to ensure, if desired, that no cyclic dependencies can exist in the returned map. If a cyclic dependency exists in the input substitution map, substitution_map, then with this flag set to true the dependency cycle is broken by a dictionary-ordered substitution. For example, if the substitution map contains two entries map["a"] -> "b" and map["b"] -> "a", then the result of calling this function would be a map with the elements map["a"] -> "a" and map["b"] -> "a".

      If one symbol is an explicit function of another, and it is desired that all their values are completely resolved, then it may be necessary to perform substitution a number of times before the result is finalized. This function performs substitution sweeps for a set of symbolic variables until all explicit relationships between the symbols in the map have been resolved. Whether each entry returns a symbolic or real value depends on the nature of the values stored in the substitution map. If the values associated with a key are also symbolic then the returned result may still be symbolic in nature. The terminal result of using the input substitution map, symbol_values, is then guaranteed to be rendered by a single substitution of the returned dependency-resolved map.

      -

      Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

      +

      Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

      @@ -3720,11 +3720,11 @@ If the symbols stored in the map are explicitly dependent on one another, then the returned result depends on the order in which the map is traversed. It is recommended to first resolve all interdependencies in the map using the resolve_explicit_dependencies() function.

      Examples:

      1. -

        If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

        +

        If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

      2. -If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
      3. +If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
      @@ -3875,7 +3875,7 @@
      [in]symbolAn identifier (or name) for the returned symbolic variable.

      Return a vector of Expressions representing a vectorial symbolic variable with the identifier specified by symbol.

      -

      For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

      +

      For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

      Template Parameters
      @@ -3950,7 +3950,7 @@
      dimThe dimension of the returned tensor.

      Return a symmetric tensor of Expressions representing a tensorial symbolic variable with the identifier specified by symbol.

      -

      For example, if the symbol is the string "S" then the tensorial symbolic variable that is returned represents the vector $S$. Each component of $S$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

      +

      For example, if the symbol is the string "S" then the tensorial symbolic variable that is returned represents the vector $S$. Each component of $S$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

      Template Parameters
      @@ -4115,7 +4115,7 @@
      rankThe rank of the returned tensor.
      -
      Returns
      The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
      +
      Returns
      The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
      @@ -4144,7 +4144,7 @@
    -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4173,7 +4173,7 @@
    -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4202,7 +4202,7 @@
    -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4231,7 +4231,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4260,7 +4260,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4289,7 +4289,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4318,7 +4318,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4347,8 +4347,8 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
-\mathbf{T}_{2}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
+\mathbf{T}_{2}}$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-12-27 18:25:16.164924277 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-12-27 18:25:16.168924304 +0000 @@ -236,13 +236,13 @@

    Using the constraint information usually leads to reductions in bandwidth of 10 or 20 per cent, but may for some very unstructured grids also lead to an increase. You have to weigh the decrease in your case with the time spent to use the constraint information, which usually is several times longer than the ‘pure’ renumbering algorithm.

    In almost all cases, the renumbering scheme finds a corner to start with. Since there is more than one corner in most grids and since even an interior degree of freedom may be a better starting point, giving the starting point by the user may be a viable way if you have a simple scheme to derive a suitable point (e.g. by successively taking the third child of the cell top left of the coarsest level, taking its third vertex and the dof index thereof, if you want the top left corner vertex). If you do not know beforehand what your grid will look like (e.g. when using adaptive algorithms), searching a best starting point may be difficult, however, and in many cases will not justify the effort.

    Component-wise and block-wise numberings

    -

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    +

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    This kind of numbering may be obtained by calling the component_wise() function of this class. Since it does not touch the order of indices within each component, it may be worthwhile to first renumber using the Cuthill-McKee or a similar algorithm and afterwards renumbering component-wise. This will bring out the matrix structure and additionally have a good numbering within each block.

    The component_wise() function allows not only to honor enumeration based on vector components, but also allows to group together vector components into "blocks" using a defaulted argument to the various DoFRenumbering::component_wise() functions (see GlossComponent vs GlossBlock for a description of the difference). The blocks designated through this argument may, but do not have to be, equal to the blocks that the finite element reports. For example, a typical Stokes element would be

    FESystem<dim> stokes_fe (FE_Q<dim>(2), dim, // dim velocities
    FE_Q<dim>(1), 1); // one pressure
    Definition fe_system.h:208
    Definition fe_q.h:554
    -

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    +

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    On the other hand, if you really want to use block structure reported by the finite element itself (a case that is often the case if you have finite elements that have multiple vector components, e.g. the FE_RaviartThomas or FE_Nedelec elements) then you can use the DoFRenumbering::block_wise instead of the DoFRenumbering::component_wise functions.

    Cell-wise numbering

    Given an ordered vector of cells, the function cell_wise() sorts the degrees of freedom such that degrees on earlier cells of this vector will occur before degrees on later cells.

    @@ -255,7 +255,7 @@

    The MatrixFree class provides optimized algorithms for interleaving operations on vectors before and after the access of the vector data in the respective loops. The algorithm matrix_free_data_locality() makes sure that all unknowns with a short distance between the first and last access are grouped together, in order to increase the spatial data locality.

    A comparison of reordering strategies

    As a benchmark of comparison, let us consider what the different sparsity patterns produced by the various algorithms when using the $Q_2^d\times
-Q_1$ element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    +Q_1$" src="form_991.png"/> element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    The goal of reordering strategies is to improve the preconditioner. In step-22 we use a SparseILU to preconditioner for the velocity-velocity block at the top left. The quality of the preconditioner can then be measured by the number of CG iterations required to solve a linear system with this block. For some of the reordering strategies below we record this number for adaptive refinement cycle 3, with 93176 degrees of freedom; because we solve several linear systems with the same matrix in the Schur complement, the average number of iterations is reported. The lower the number the better the preconditioner and consequently the better the renumbering of degrees of freedom is suited for this task. We also state the run-time of the program, in part determined by the number of iterations needed, for the first 4 cycles on one of our machines. Note that the reported times correspond to the run time of the entire program, not just the affected solver; if a program runs twice as fast with one particular ordering than with another one, then this means that the actual solver is actually several times faster.

    @@ -467,7 +467,7 @@
    -

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    You can specify that the components are ordered in a different way than suggested by the FESystem object you use. To this end, set up the vector target_component such that the entry at index i denotes the number of the target component for dofs with component i in the FESystem. Naming the same target component more than once is possible and results in a blocking of several components into one. This is discussed in step-22. If you omit this argument, the same order as given by the finite element is used.

    If one of the base finite elements from which the global finite element under consideration here, is a non-primitive one, i.e. its shape functions have more than one non-zero component, then it is not possible to associate these degrees of freedom with a single vector component. In this case, they are associated with the first vector component to which they belong.

    For finite elements with only one component, or a single non-primitive base element, this function is the identity operation.

    @@ -561,7 +561,7 @@
    -

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    Note
    This function only succeeds if each of the elements in the hp::FECollection attached to the DoFHandler argument has exactly the same number of blocks (see the glossary for more information). Note that this is not always given: while the hp::FECollection class ensures that all of its elements have the same number of vector components, they need not have the same number of blocks. At the same time, this function here needs to match individual blocks across elements and therefore requires that elements have the same number of blocks and that subsequent blocks in one element have the same meaning as in another element.

    Definition at line 997 of file dof_renumbering.cc.

    @@ -646,7 +646,7 @@
  • For meshes based on parallel::distributed::Triangulation, the locally owned cells of each MPI process are contiguous in Z order. That means that numbering degrees of freedom by visiting cells in Z order yields locally owned DoF indices that consist of contiguous ranges for each process. This is also true for the default ordering of DoFs on such triangulations, but the default ordering creates an enumeration that also depends on how many processors participate in the mesh, whereas the one generated by this function enumerates the degrees of freedom on a particular cell with indices that will be the same regardless of how many processes the mesh is split up between.
  • For meshes based on parallel::shared::Triangulation, the situation is more complex. Here, the set of locally owned cells is determined by a partitioning algorithm (selected by passing an object of type parallel::shared::Triangulation::Settings to the constructor of the triangulation), and in general these partitioning algorithms may assign cells to subdomains based on decisions that may have nothing to do with the Z order. (Though it is possible to select these flags in a way so that partitioning uses the Z order.) As a consequence, the cells of one subdomain are not contiguous in Z order, and if one renumbered degrees of freedom based on the Z order of cells, one would generally end up with DoF indices that on each processor do not form a contiguous range. This is often inconvenient (for example, because PETSc cannot store vectors and matrices for which the locally owned set of indices is not contiguous), and consequently this function uses the following algorithm for parallel::shared::Triangulation objects:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-12-27 18:25:16.248924853 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-12-27 18:25:16.256924908 +0000 @@ -325,7 +325,7 @@

    Setting up sparsity patterns for boundary matrices

    In some cases, one wants to only work with DoFs that sit on the boundary. One application is, for example, if rather than interpolating non- homogeneous boundary values, one would like to project them. For this, we need two things: a way to identify nodes that are located on (parts of) the boundary, and a way to build matrices out of only degrees of freedom that are on the boundary (i.e. much smaller matrices, in which we do not even build the large zero block that stems from the fact that most degrees of freedom have no support on the boundary of the domain). The first of these tasks is done by the map_dof_to_boundary_indices() function (described above).

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma
-\varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    +\varphi_{b2d(i)} \varphi_{b2d(j)} dx$" src="form_1008.png"/>, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    DoF coupling between surface triangulations and bulk triangulations

    When working with Triangulation and DoFHandler objects of different co-dimension, such as a Triangulation<2,3>, describing (part of) the boundary of a Triangulation<3>, and their corresponding DoFHandler objects, one often needs to build a one-to-one matching between the degrees of freedom that live on the surface Triangulation and those that live on the boundary of the bulk Triangulation. The GridGenerator::extract_boundary_mesh() function returns a mapping of surface cell iterators to face iterators, that can be used by the function map_boundary_to_bulk_dof_iterators() to construct a map between cell iterators of the surface DoFHandler, and the corresponding pair of cell iterator and face index of the bulk DoFHandler. Such map can be used to initialize FEValues and FEFaceValues for the corresponding DoFHandler objects. Notice that one must still ensure that the ordering of the quadrature points coincide in the two objects, in order to build a coupling matrix between the two sytesm.

    Enumeration Type Documentation

    @@ -500,7 +500,7 @@

    Here, combined_orientation is the relative orientation of face_1 with respect to face_2. This is typically computed by GridTools::orthogonal_equality().

    Optionally a matrix matrix along with a std::vector first_vector_components can be specified that describes how DoFs on face_1 should be modified prior to constraining to the DoFs of face_2. Here, two declarations are possible: If the std::vector first_vector_components is non empty the matrix is interpreted as a dim $\times$ dim rotation matrix that is applied to all vector valued blocks listed in first_vector_components of the FESystem. If first_vector_components is empty the matrix is interpreted as an interpolation matrix with size no_face_dofs $\times$ no_face_dofs.

    This function makes sure that identity constraints don't create cycles in constraints.

    -

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    +

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    Detailed information can be found in the see Glossary entry on periodic boundary conditions.

    Definition at line 3591 of file dof_tools_constraints.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-12-27 18:25:16.276925046 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-12-27 18:25:16.280925073 +0000 @@ -178,7 +178,7 @@
    -

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    +

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    Definition at line 29 of file fe_series.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-12-27 18:25:16.328925403 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-12-27 18:25:16.328925403 +0000 @@ -395,17 +395,17 @@

    This is a rather specialized function used during the construction of finite element objects. It is used to build the basis of shape functions for an element, given a set of polynomials and interpolation points. The function is only implemented for finite elements with exactly dim vector components. In particular, this applies to classes derived from the FE_PolyTensor class.

    -

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    +

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    \begin{align*}
   \Psi_i [\varphi_j] = \sum_{k=1}^N c_{jk} \Psi_i[\tilde\varphi_k],
 \end{align*}

    -

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    +

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    \begin{align*}
   I = C X^T
 \end{align*}

    -

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    +

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    Until the finite element receives this matrix $X$ back, it describes its shape functions (e.g., in FiniteElement::shape_value()) in the form $\tilde\varphi_j$. After it calls this function, it has the expansion coefficients and can describe its shape functions as $\varphi_j$.

    This function therefore computes this matrix $X$, for the following specific circumstances:

    -

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    +

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    Note
    The resulting field does not satisfy continuity requirements of the given finite elements if the algorithm outlined above is used. When you use continuous elements on grids with hanging nodes, please use the extrapolate function with an additional AffineConstraints argument, see below.
    Since this function operates on patches of cells, it requires that the underlying grid is refined at least once for every coarse grid cell. If this is not the case, an exception will be raised.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 2024-12-27 18:25:16.352925567 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 2024-12-27 18:25:16.356925595 +0000 @@ -139,10 +139,10 @@
    -

    An enum that describes the outcome of comparing two elements for mutual domination. If one element dominates another, then the restriction of the space described by the dominated element to a face of the cell is strictly larger than that of the dominating element. For example, in 2-d Q(2) elements dominate Q(4) elements, because the traces of Q(4) elements are quartic polynomials which is a space strictly larger than the quadratic polynomials (the restriction of the Q(2) element). Similar reasonings apply for vertices and cells as well. In general, Q(k) dominates Q(k') if $k\le k'$.

    +

    An enum that describes the outcome of comparing two elements for mutual domination. If one element dominates another, then the restriction of the space described by the dominated element to a face of the cell is strictly larger than that of the dominating element. For example, in 2-d Q(2) elements dominate Q(4) elements, because the traces of Q(4) elements are quartic polynomials which is a space strictly larger than the quadratic polynomials (the restriction of the Q(2) element). Similar reasonings apply for vertices and cells as well. In general, Q(k) dominates Q(k') if $k\le k'$.

    This enum is used in the FiniteElement::compare_for_domination() function that is used in the context of hp-finite element methods when determining what to do at faces where two different finite elements meet (see the hp-paper for a more detailed description of the following). In that case, the degrees of freedom of one side need to be constrained to those on the other side. The determination which side is which is based on the outcome of a comparison for mutual domination: the dominated side is constrained to the dominating one.

    -

    Note that there are situations where neither side dominates. The hp-paper lists two case, with the simpler one being that a $Q_2\times Q_1$ vector-valued element (i.e. a FESystem(FE_Q(2),1,FE_Q(1),1)) meets a $Q_1\times Q_2$ element: here, for each of the two vector-components, we can define a domination relationship, but it is different for the two components.

    -

    It is clear that the concept of domination doesn't matter for discontinuous elements. However, discontinuous elements may be part of vector-valued elements and may therefore be compared against each other for domination. They should return either_element_can_dominate in that case. Likewise, when comparing two identical finite elements, they should return this code; the reason is that we can not decide which element will dominate at the time we look at the first component of, for example, two $Q_2\times Q_1$ and $Q_2\times Q_2$ elements, and have to keep our options open until we get to the second base element.

    +

    Note that there are situations where neither side dominates. The hp-paper lists two case, with the simpler one being that a $Q_2\times Q_1$ vector-valued element (i.e. a FESystem(FE_Q(2),1,FE_Q(1),1)) meets a $Q_1\times Q_2$ element: here, for each of the two vector-components, we can define a domination relationship, but it is different for the two components.

    +

    It is clear that the concept of domination doesn't matter for discontinuous elements. However, discontinuous elements may be part of vector-valued elements and may therefore be compared against each other for domination. They should return either_element_can_dominate in that case. Likewise, when comparing two identical finite elements, they should return this code; the reason is that we can not decide which element will dominate at the time we look at the first component of, for example, two $Q_2\times Q_1$ and $Q_2\times Q_2$ elements, and have to keep our options open until we get to the second base element.

    Finally, the code no_requirements exists for cases where elements impose no continuity requirements. The case is primarily meant for FE_Nothing which is an element that has no degrees of freedom in a subdomain. It could also be used by discontinuous elements, for example.

    More details on domination can be found in the hp-paper.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-12-27 18:25:16.376925732 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-12-27 18:25:16.380925760 +0000 @@ -143,12 +143,12 @@ - + - + - + - +
    const unsigned int component = 0&#href_anchor"memdoc"> -

    Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

    +

    Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

    Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

    The function value, gradient, and Hessian are computed at the box center. The bounds on the value of the function are then estimated as

    $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
    + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

    -

    Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

    +

    Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

    $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

    If the function has more than 1 component the component parameter can be used to specify which function component the bounds should be computed for.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-12-27 18:25:16.472926391 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-12-27 18:25:16.480926446 +0000 @@ -281,7 +281,7 @@
    const bool colorize = false&#href_anchor"memdoc"> -

    Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

    +

    Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

    If the argument colorize is false, then all boundary indicators are set to zero (the default boundary indicator) for 2d and 3d. If it is true, the boundary is colorized as in hyper_rectangle(). In 1d the indicators are always colorized, see hyper_rectangle().

    @@ -725,7 +725,7 @@
    const bool colorize = false&#href_anchor"memdoc"> -

    Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

      +

      Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

      1. If n_shells is greater than zero, then there are that many shells centered around the cylinder,
      2. @@ -749,10 +749,10 @@
        Parameters
        - + - +
        triaTriangulation to be created. Must be empty upon calling this function.
        shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
        shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
        n_shellsNumber of shells to use in the shell layer.
        skewnessParameter controlling how close the shells are to the cylinder: see the mathematical definition given in GridGenerator::concentric_hyper_shells.
        colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
        colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
        @@ -1123,12 +1123,12 @@
    const unsigned int n_rotate_middle_square&#href_anchor"memdoc"> -

    Generate a 2d mesh consisting of five squares arranged in a plus-shape. Depending on the number n_rotate_middle_square passed the middle square is rotated by a degree of n_rotate_middle_square $\pi/2$. This way one can generate a mesh in which the middle square contains edges that have the opposite tangential and/or opposite normal orientation compared to the neighboring edges of the other squares.

    +

    Generate a 2d mesh consisting of five squares arranged in a plus-shape. Depending on the number n_rotate_middle_square passed the middle square is rotated by a degree of n_rotate_middle_square $\pi/2$. This way one can generate a mesh in which the middle square contains edges that have the opposite tangential and/or opposite normal orientation compared to the neighboring edges of the other squares.

    This mesh is not overly useful from a practical point of view. For debugging purposes it can be used to check for orientation issues for vector- or tensor-valued finite elements.

    Parameters
    - +
    [out]triaThe input triangulation.
    [in]n_rotate_middle_squarenumber of rotations in [0,4) of right square by $\pi/2$.
    [in]n_rotate_middle_squarenumber of rotations in [0,4) of right square by $\pi/2$.
    @@ -1165,7 +1165,7 @@
    const bool manipulate_left_cube&#href_anchor"memdoc"> -

    Generate a 3d mesh consisting of the unit cube joined with a copy shifted by $s = (1,0,0)$. Depending on the flags passed either the right or the left cube (when looking at the positively oriented (x,z)-plane) contains a face that is either not in standard orientation and/or is rotated by either $\pi/2$, $\pi$ or $3/2\pi$.

    +

    Generate a 3d mesh consisting of the unit cube joined with a copy shifted by $s = (1,0,0)$. Depending on the flags passed either the right or the left cube (when looking at the positively oriented (x,z)-plane) contains a face that is either not in standard orientation and/or is rotated by either $\pi/2$, $\pi$ or $3/2\pi$.

    This mesh is not overly useful from a practical point of view. For debugging purposes it can be used to check for orientation issues for vector- or tensor-valued finite elements.

    Parameters
    @@ -1316,7 +1316,7 @@ - +
    const double half_length = 1.&#href_anchor"memdoc"> -

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

    +

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

    The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

    The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

    Precondition
    The triangulation passed as argument needs to be empty when calling this function.
    @@ -1351,7 +1351,7 @@
    const double half_length = 1.&#href_anchor"memdoc"> -

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

    +

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

    The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

    The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

    @@ -1457,7 +1457,7 @@
    triaAn empty triangulation which will hold the pipe junction geometry.
    openingsCenter point and radius of each of the three openings. The container has to be of size three.
    bifurcationCenter point of the bifurcation and hypothetical radius of each truncated cone at the bifurcation.
    aspect_ratioAspect ratio of cells, specified as radial over z-extension. Default ratio is $\Delta r/\Delta z = 1/2$.
    aspect_ratioAspect ratio of cells, specified as radial over z-extension. Default ratio is $\Delta r/\Delta z = 1/2$.
    @@ -1472,13 +1472,13 @@
    Point Radius
    Openings $(2,0,0)$ $1$
    Openings $(2,0,0)$ $1$
    $(0,2,0)$ $1$
    $(0,2,0)$ $1$
    $(0,0,2)$ $1$
    $(0,0,2)$ $1$
    Bifurcation $(0,0,0)$ $1$
    Bifurcation $(0,0,0)$ $1$
    @@ -1491,13 +1491,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(0,2,0)$ $1$ +$(0,2,0)$ $1$ -$(2,0,0)$ $1$ +$(2,0,0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$
    @@ -1510,13 +1510,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(1,\sqrt{3},0)$ $1$ +$(1,\sqrt{3},0)$ $1$ -$(1,-\sqrt{3},0)$ $1$ +$(1,-\sqrt{3},0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$

    Definition at line 266 of file grid_generator_pipe_junction.cc.

    @@ -1551,7 +1551,7 @@
    Parameters
    - +
    triaA Triangulation object which has to be empty.
    sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
    sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
    colorize_cellsIf colorization is enabled, then the material id of a cells corresponds to the leg it is in. The id of the center cell is zero, and then the legs are numbered starting at one (see the glossary entry on colorization).
    @@ -1738,9 +1738,9 @@
  • 96 for the rhombic dodecahedron refined once. This choice dates from an older version of deal.II before the Manifold classes were implemented: today this choce is equivalent to the rhombic dodecahedron after performing one global refinement.
  • -Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
  • +Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively. -

    The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

    +

    The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

    The 3d grids with 12 and 96 cells are plotted below:

    @@ -1893,7 +1893,7 @@ const bool colorize = false&#href_anchor"memdoc">

    Produce a domain that is the intersection between a hyper-shell with given inner and outer radius, i.e. the space between two circles in two space dimensions and the region between two spheres in 3d, and the positive quadrant (in 2d) or octant (in 3d). In 2d, this is indeed a quarter of the full annulus, while the function is a misnomer in 3d because there the domain is not a quarter but one eighth of the full shell.

    If the number of initial cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio in 2d.

    -

    If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

    +

    If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

    All manifold ids are set to zero, and a SphericalManifold is attached to the triangulation.

    Precondition
    The triangulation passed as argument needs to be empty when calling this function.
    @@ -1941,7 +1941,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

    Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

    +

    Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

    If n_radial_cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio. The same holds for n_axial_cells.

    If colorize is set to true, a boundary id of 0 is set for the inner cylinder, a boundary id of 1 is set for the outer cylinder, a boundary id of 2 is set for the bottom (z-) boundary and a boundary id of 3 is set for the top (z+) boundary.

    Note
    Although this function is declared as a template, it does not make sense in 1d and 2d. Also keep in mind that this object is rotated and positioned differently than the one created by cylinder().
    @@ -1989,9 +1989,9 @@
    -

    Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

    +

    Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

    If dim is 3, the mesh will be the volume of the torus, using a mesh equivalent to the circle in the poloidal coordinates with 5 cells on the cross section. This function attaches a TorusManifold to all boundary faces which are marked with a manifold id of 1, a CylindricalManifold to the interior cells and all their faces which are marked with a manifold id of 2 (representing a flat state within the poloidal coordinates), and a TransfiniteInterpolationManifold to the cells between the TorusManifold on the surface and the ToroidalManifold in the center, with cells marked with manifold id 0.

    -

    An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

    +

    An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

    @@ -2003,7 +2003,7 @@ centerline_radiusThe radius of the circle which forms the center line of the torus containing the loop of cells. Must be greater than inner_radius. inner_radiusThe distance between the inner edge of the torus and origin. n_cells_toroidalOptional argument to set the number of cell layers in toroidal direction. The default is 6 cell layers. - phiOptional argument to generate an open torus with angle $0 < \varphi <= 2 \pi$. The default value is $2 \pi$, in which case a closed torus is generated. If the torus is open, the torus is cut at two planes perpendicular to the torus centerline. The center of these two planes are located at $(x_1, y_1, z_1) = (R, 0, 0)$ and $(x_2, y_2, z_2) = (R \cos(\varphi), 0, R \sin(\varphi))$. + phiOptional argument to generate an open torus with angle $0 < \varphi <= 2 \pi$. The default value is $2 \pi$, in which case a closed torus is generated. If the torus is open, the torus is cut at two planes perpendicular to the torus centerline. The center of these two planes are located at $(x_1, y_1, z_1) = (R, 0, 0)$ and $(x_2, y_2, z_2) = (R \cos(\varphi), 0, R \sin(\varphi))$.
    @@ -2048,8 +2048,8 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-12-27 18:25:16.512926666 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-12-27 18:25:16.520926721 +0000 @@ -340,12 +340,12 @@ const unsigned int order = 2&#href_anchor"memdoc">

    This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell $K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

    -

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    +

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    \[
   N(m) = (N_0-m) + 2^d m = N_0 + (2^d-1)m
 \]

    -

    cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    +

    cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    \[
   \eta^\text{exp}(m)
   =
@@ -354,8 +354,8 @@
   \sum_{K, K\; \text{will be refined}} 2^{-\text{order}}\eta_K
 \]

    -

    where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    -

    This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

    +

    where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    +

    This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

    \[
   J(m) = N(m)^{\text{order}/d} \eta^\text{exp}(m)
 \] @@ -364,7 +364,7 @@

    The rationale for this function is two-fold. First, compared to the refine_and_coarsen_fixed_fraction() and refine_and_coarsen_fixed_number() functions, this function has the property that if all refinement indicators are the same (i.e., we have achieved a mesh where the error per cell is equilibrated), then the entire mesh is refined. This is based on the observation that a mesh with equilibrated error indicators is the optimal mesh (i.e., has the least overall error) among all meshes with the same number of cells. (For proofs of this, see R. Becker, M. Braack, R. Rannacher: "Numerical simulation of laminar flames at low Mach number with adaptive finite elements", Combustion Theory and Modelling, Vol. 3, Nr. 3, p. 503-534 1999; and W. Bangerth, R. Rannacher: "Adaptive Finite Element Methods for Differential Equations", Birkhauser, 2003.)

    -

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    +

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    Note
    This function was originally implemented by Thomas Richter. It follows a strategy described in [Richter2005]. See in particular Section 4.3, pp. 42-43.

    Definition at line 447 of file grid_refinement.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-12-27 18:25:16.628927463 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-12-27 18:25:16.636927518 +0000 @@ -469,7 +469,7 @@ Triangulation< dim, spacedim > & triangulation&#href_anchor"memdoc">

    Transform the vertices of the given triangulation by applying the function object provided as first argument to all its vertices.

    -

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    Triangulation<dim> triangulation;
    +

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    Triangulation<dim> triangulation;
    ... // fill triangulation with something
    {
    @@ -649,13 +649,13 @@ const bool solve_for_absolute_positions = false&#href_anchor"memdoc">

    Transform the given triangulation smoothly to a different domain where, typically, each of the vertices at the boundary of the triangulation is mapped to the corresponding points in the new_points map.

    -

    The unknown displacement field $u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    -\[ \min\, \int \frac{1}{2}
+<p>The unknown displacement field <picture><source srcset=$u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    +\[ \min\, \int \frac{1}{2}
   c(\mathbf x)
   \mathbf \nabla u_d(\mathbf x) \cdot
   \mathbf \nabla u_d(\mathbf x)
   \,\rm d x
-\] +\]" src="form_1463.png"/>

    subject to prescribed constraints. The minimizer is obtained by solving the Laplace equation of the dim components of a displacement field that maps the current domain into one described by new_points . Linear finite elements with four Gaussian quadrature points in each direction are used. The difference between the vertex positions specified in new_points and their current value in tria therefore represents the prescribed values of this displacement field at the boundary of the domain, or more precisely at all of those locations for which new_points provides values (which may be at part of the boundary, or even in the interior of the domain). The function then evaluates this displacement field at each unconstrained vertex and uses it to place the mapped vertex where the displacement field locates it. Because the solution of the Laplace equation is smooth, this guarantees a smooth mapping from the old domain to the new one.

    Parameters
    @@ -2126,7 +2126,7 @@

    This function does the same as the previous one, i.e. it partitions a triangulation using a partitioning algorithm into a number of subdomains identified by the cell->subdomain_id() flag.

    The difference to the previous function is the second argument, a sparsity pattern that represents the connectivity pattern between cells.

    -

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    +

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    This function is mainly useful in cases where connections between cells exist that are not present in the triangulation alone (otherwise the previous function would be the simpler one to use). Such connections may include that certain parts of the boundary of a domain are coupled through symmetric boundary conditions or integrals (e.g. friction contact between the two sides of a crack in the domain), or if a numerical scheme is used that not only connects immediate neighbors but a larger neighborhood of cells (e.g. when solving integral equations).

    In addition, this function may be useful in cases where the default sparsity pattern is not entirely sufficient. This can happen because the default is to just consider face neighbors, not neighboring cells that are connected by edges or vertices. While the latter couple when using continuous finite elements, they are typically still closely connected in the neighborship graph, and partitioning algorithm will not usually cut important connections in this case. However, if there are vertices in the mesh where many cells (many more than the common 4 or 6 in 2d and 3d, respectively) come together, then there will be a significant number of cells that are connected across a vertex, but several degrees removed in the connectivity graph built only using face neighbors. In a case like this, partitioning algorithm may sometimes make bad decisions and you may want to build your own connectivity graph.

    Note
    If the weight signal has been attached to the triangulation, then this will be used and passed to the partitioner.
    @@ -2549,7 +2549,7 @@ const FullMatrix< double > & matrix = FullMatrix<double>()&#href_anchor"memdoc">

    An orthogonal equality test for faces.

    face1 and face2 are considered equal, if a one to one matching between its vertices can be achieved via an orthogonal equality relation. If no such relation exists then the returned std::optional object is empty (i.e., has_value() will return false).

    -

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    +

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    If the matching was successful, the relative orientation of face1 with respect to face2 is returned a std::optional<unsigned char>, in which the stored value is the same orientation bit format used elsewhere in the library. More information on that topic can be found in the glossary article.

    Definition at line 2426 of file grid_tools_dof_handlers.cc.

    @@ -2600,8 +2600,8 @@

    This function tries to match all faces belonging to the first boundary with faces belonging to the second boundary with the help of orthogonal_equality().

    The unsigned char that is returned inside of PeriodicFacePair encodes the relative orientation of the first face with respect to the second face, see the documentation of orthogonal_equality() for further details.

    The direction refers to the space direction in which periodicity is enforced. When matching periodic faces this vector component is ignored.

    -

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    -

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    +

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    +

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    Template Parameters
    @@ -3131,8 +3131,8 @@
    MeshTypeA type that satisfies the requirements of the MeshType concept.
    -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim).

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -3164,8 +3164,8 @@ const Mapping< dim, spacedim > & mapping&#href_anchor"memdoc"> -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim.

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -3303,8 +3303,8 @@
    -

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
-p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    +

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
+p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping, even in cases where the actual transformation by a bi-/trilinear or higher order mapping might be singular. The result is exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    This approximation is underlying the function TriaAccessor::real_to_unit_cell_affine_approximation() function.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    @@ -3335,7 +3335,7 @@ const Quadrature< dim > & quadrature&#href_anchor"memdoc"> -

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    +

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    Note
    Inverted elements do not throw an exception. Instead, a value of inf is written into the vector in case of inverted elements.
    Make sure to use enough quadrature points for a precise calculation of the aspect ratio in case of deformed elements.
    @@ -3537,7 +3537,7 @@ const double tol = 1e-12&#href_anchor"memdoc">

    Remove vertices that are duplicated, due to the input of a structured grid, for example. If these vertices are not removed, the faces bounded by these vertices become part of the boundary, even if they are in the interior of the mesh.

    -

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    +

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    Two vertices are considered equal if their difference in each coordinate direction is less than tol. This implies that nothing happens if the tolerance is set to zero.

    Definition at line 348 of file grid_tools_topology.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 2024-12-27 18:25:16.668927737 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 2024-12-27 18:25:16.672927765 +0000 @@ -161,7 +161,7 @@
    -

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and the gradient $g(x\_low)$, return the local minimizer of the quadratic interpolation function.

    +

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and the gradient $g(x\_low)$, return the local minimizer of the quadratic interpolation function.

    The return type is optional to fit with similar functions that may not have a solution for given parameters.

    @@ -206,7 +206,7 @@
    -

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and its gradients ( $g(x\_low)*g(x\_hi) < 0$) at those points, return the local minimizer of the cubic interpolation function (that is, the location where the cubic interpolation function attains its minimum value).

    +

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and its gradients ( $g(x\_low)*g(x\_hi) < 0$) at those points, return the local minimizer of the cubic interpolation function (that is, the location where the cubic interpolation function attains its minimum value).

    The return type is optional as the real-valued solution might not exist.

    @@ -256,7 +256,7 @@
    -

    Find the minimizer of a cubic polynomial that goes through the points $f\_low=f(x\_low)$, $f\_hi=f(x\_hi)$ and $f\_rec(x\_rec)$ and has derivatve $g\_low$ at $x\_low$.

    +

    Find the minimizer of a cubic polynomial that goes through the points $f\_low=f(x\_low)$, $f\_hi=f(x\_hi)$ and $f\_rec(x\_rec)$ and has derivatve $g\_low$ at $x\_low$.

    The return type is optional as the real-valued solution might not exist.

    @@ -438,15 +438,15 @@ const bool debug_output = false&#href_anchor"memdoc"> -

    Perform a line search in $(0,max]$ with strong Wolfe conditions

    -\[
+<p>Perform a line search in <picture><source srcset=$(0,max]$ with strong Wolfe conditions

    +\[
 f(\alpha) \le f(0) + \alpha \mu f'(0) \\
 |f'(\alpha)| \le \eta |f'(0)|
-\] +\]" src="form_2502.png"/>

    using the one dimensional function func in conjunction with a function interpolate to choose a new point from the interval based on the function values and derivatives at its ends. The parameter a1 is a trial estimate of the first step. Interpolation can be done using either poly_fit() or poly_fit_three_points(), or any other function that has a similar signature.

    The function implements Algorithms 2.6.2 and 2.6.4 on pages 34-35 in [Fletcher2013]. These are minor variations of Algorithms 3.5 and 3.6 on pages 60-61 in [Nocedal2006]. It consists of a bracketing phase and a zoom phase, where interpolate is used.

    -

    Two examples of use might be as follows: In the first example, we wish to find the minimum of the function $100 * x^4 + (1-x)^2$. To find the approximate solution using line search with a polynomial fit to the curve one would perform the following steps:

    +

    Two examples of use might be as follows: In the first example, we wish to find the minimum of the function $100 * x^4 + (1-x)^2$. To find the approximate solution using line search with a polynomial fit to the curve one would perform the following steps:

    auto func = [](const double x)
    {
    const double f = 100. * std::pow(x, 4) + std::pow(1. - x, 2); // Value
    @@ -570,7 +570,7 @@ funcA one dimensional function which returns value and derivative at the given point. f0The function value at the origin. g0The function derivative at the origin. - interpolateA function which determines how interpolation is done during the zoom phase. It takes values and derivatives at the current interval/bracket ( $f\_low$, $f\_hi$) as well as up to 5 values and derivatives at previous steps. The returned value is to be provided within the given bounds. + interpolateA function which determines how interpolation is done during the zoom phase. It takes values and derivatives at the current interval/bracket ( $f\_low$, $f\_hi$) as well as up to 5 values and derivatives at previous steps. The returned value is to be provided within the given bounds. a1Initial trial step for the bracketing phase. etaA parameter in the second Wolfe condition (curvature condition). muA parameter in the first Wolfe condition (sufficient decrease). /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-12-27 18:25:16.700927957 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-12-27 18:25:16.704927984 +0000 @@ -141,9 +141,9 @@

    The namespace L2 contains functions for mass matrices and L2-inner products.

    Notational conventions

    In most cases, the action of a function in this namespace can be described by a single integral. We distinguish between integrals over cells Z and over faces F. If an integral is denoted as

    -\[
+<picture><source srcset=\[
   \int_Z u \otimes v \,dx,
-\] +\]" src="form_1639.png"/>

    it will yield the following results, depending on the type of operation

    -

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    +

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    Signature of functions

    Functions in this namespace follow a generic signature. In the simplest case, you have two related functions

    template <int dim>
    void
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-12-27 18:25:16.736928204 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-12-27 18:25:16.740928232 +0000 @@ -175,8 +175,8 @@ const double factor = 1.&#href_anchor"memdoc">

    Advection along the direction w in weak form with derivative on the test function

    -\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
-\, dx. \] +\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
+\, dx. \]

    The FiniteElement in fe may be scalar or vector valued. In the latter case, the advection operator is applied to each component separately.

    Parameters
    @@ -235,7 +235,7 @@

    Scalar advection residual operator in strong form

    -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \]

    Warning
    This is not the residual consistent with cell_matrix(), but with its transpose.
    @@ -284,8 +284,8 @@

    Vector-valued advection residual operator in strong form

    -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
-\cdot\mathbf v_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
+\cdot\mathbf v_i \, dx. \]

    Warning
    This is not the residual consistent with cell_matrix(), but with its transpose.
    @@ -334,7 +334,7 @@

    Scalar advection residual operator in weak form

    -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \]

    Definition at line 215 of file advection.h.

    @@ -382,8 +382,8 @@

    Vector-valued advection residual operator in weak form

    -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
-\cdot\mathbf u_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
+\cdot\mathbf u_i \, dx. \]

    Definition at line 255 of file advection.h.

    @@ -423,11 +423,11 @@ double factor = 1.&#href_anchor"memdoc">

    Upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and zero else:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 [\mathbf w\cdot\mathbf n]_+
 u_i v_j \, ds
-\] +\]" src="form_1593.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -481,13 +481,13 @@

    Scalar case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1594.png"/>

    -

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    +

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -540,13 +540,13 @@

    Vector-valued case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1594.png"/>

    -

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    +

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -612,13 +612,13 @@ const double factor = 1.&#href_anchor"memdoc">

    Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1596.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    @@ -675,13 +675,13 @@ const double factor = 1.&#href_anchor"memdoc">

    Scalar case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1596.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    @@ -738,13 +738,13 @@ const double factor = 1.&#href_anchor"memdoc">

    Vector-valued case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1596.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-12-27 18:25:16.776928479 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-12-27 18:25:16.784928534 +0000 @@ -170,7 +170,7 @@ double factor = 1.&#href_anchor"memdoc">

    Cell matrix for divergence. The derivative is on the trial function.

    -\[ \int_Z v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    @@ -206,8 +206,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the divergence operator in strong form.

    -\[ \int_Z
-v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z
+v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -244,8 +244,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the divergence operator in weak form.

    -\[ - \int_Z
-\nabla v \cdot \mathbf u \,dx \] +\[ - \int_Z
+\nabla v \cdot \mathbf u \,dx \]

    This is the weak divergence operator and the test space should be at least H1. The trial functions may be discontinuous.

    Todo
    Verify: The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -282,8 +282,8 @@ double factor = 1.&#href_anchor"memdoc">

    Cell matrix for gradient. The derivative is on the trial function.

    -\[
-\int_Z \nabla u \cdot \mathbf v\,dx \] +\[
+\int_Z \nabla u \cdot \mathbf v\,dx \]

    This is the strong gradient and the trial space should be at least in H1. The test functions can be discontinuous.

    @@ -319,8 +319,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the gradient operator in strong form.

    -\[ \int_Z
-\mathbf v\cdot\nabla u \,dx \] +\[ \int_Z
+\mathbf v\cdot\nabla u \,dx \]

    This is the strong gradient operator and the trial space should be at least H1. The test functions may be discontinuous.

    The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -357,8 +357,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the gradient operator in weak form.

    -\[ -\int_Z
-\nabla\cdot \mathbf v u \,dx \] +\[ -\int_Z
+\nabla\cdot \mathbf v u \,dx \]

    This is the weak gradient operator and the test space should be at least Hdiv. The trial functions may be discontinuous.

    Todo
    Verify: The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -395,7 +395,7 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \] +\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \]

    Definition at line 258 of file divergence.h.

    @@ -435,9 +435,9 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u\cdot \mathbf n) v \,ds
-\] +\]" src="form_1604.png"/>

    Definition at line 291 of file divergence.h.

    @@ -472,9 +472,9 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the gradient operator, namely the product of the normal component of the vector valued test space and the trial space.

    -\[
+<picture><source srcset=\[
 \int_F u (\mathbf v\cdot \mathbf n) \,ds
-\] +\]" src="form_1605.png"/>

    Definition at line 323 of file divergence.h.

    @@ -534,10 +534,10 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the jump of the normal component of the vector valued trial function and the mean value of the test function.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
 \frac{v_1+v_2}{2} \,ds
-\] +\]" src="form_1606.png"/>

    Definition at line 357 of file divergence.h.

    @@ -587,12 +587,12 @@ double factor = 1.&#href_anchor"memdoc">

    The jump of the normal component

    -\[
+<picture><source srcset=\[
 \int_F
  (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
  (\mathbf v_1\cdot \mathbf n_1 + \mathbf v_2 \cdot \mathbf n_2)
 \,ds
-\] +\]" src="form_1607.png"/>

    Definition at line 416 of file divergence.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-12-27 18:25:16.808928699 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-12-27 18:25:16.816928754 +0000 @@ -167,7 +167,7 @@

    The linear elasticity operator in weak form, namely double contraction of symmetric gradients.

    -\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \] +\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \]

    Definition at line 50 of file elasticity.h.

    @@ -210,7 +210,7 @@

    Vector-valued residual operator for linear elasticity in weak form

    -\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \] +\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \]

    Definition at line 83 of file elasticity.h.

    @@ -252,10 +252,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n\Bigr)\;ds.
-\] +\]" src="form_1610.png"/>

    Definition at line 122 of file elasticity.h.

    @@ -297,10 +297,10 @@

    The matrix for the weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau \cdot v_\tau - n^T \epsilon(u_\tau) v_\tau -
 u_\tau^T \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1611.png"/>

    Definition at line 177 of file elasticity.h.

    @@ -350,12 +350,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) \cdot v - n^T \epsilon(u) v - (u-g) \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1612.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 256 of file elasticity.h.

    @@ -411,10 +411,10 @@

    The weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u_\tau-g_\tau) \cdot v_\tau - n^T \epsilon(u_\tau) v
 - (u_\tau-g_\tau) \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1614.png"/>

    Definition at line 308 of file elasticity.h.

    @@ -459,12 +459,12 @@ double factor = 1.&#href_anchor"memdoc">

    Homogeneous weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1615.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 386 of file elasticity.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-12-27 18:25:16.840928918 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-12-27 18:25:16.848928973 +0000 @@ -150,9 +150,9 @@ double factor = 1.&#href_anchor"memdoc">

    The weak form of the grad-div operator penalizing volume changes

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1616.png"/>

    Definition at line 51 of file grad_div.h.

    @@ -187,9 +187,9 @@ const double factor = 1.&#href_anchor"memdoc">

    The weak form of the grad-div residual

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1616.png"/>

    Definition at line 85 of file grad_div.h.

    @@ -231,10 +231,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u \cdot n)(v \cdot n)  - \nabla\cdot u
 v\cdot n - u \cdot n \nabla \cdot v \Bigr)\;ds.
-\] +\]" src="form_1617.png"/>

    Definition at line 121 of file grad_div.h.

    @@ -284,14 +284,14 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u \cdot \mathbf n- \mathbf g \cdot
 \mathbf n) (\mathbf v \cdot \mathbf n)
 - \nabla \cdot \mathbf u (\mathbf v \cdot \mathbf n)
 - (\mathbf u-\mathbf g) \cdot \mathbf n \nabla \cdot v\Bigr)\;ds.
-\] +\]" src="form_1618.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 173 of file grad_div.h.

    @@ -418,12 +418,12 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Grad-div residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u \cdot\mathbf n]
 \cdot[\mathbf v \cdot \mathbf n]
 - \{\nabla \cdot \mathbf u\}[\mathbf v\cdot \mathbf n]
 - [\mathbf u\times \mathbf n]\{\nabla\cdot \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1619.png"/>

    See for instance Hansbo and Larson, 2002

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-12-27 18:25:16.872929138 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-12-27 18:25:16.876929166 +0000 @@ -249,7 +249,7 @@ - +
    resultThe vector obtained as result.
    feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    factorA constant that multiplies the result.
    @@ -351,7 +351,7 @@ const double factor2 = 1.&#href_anchor"memdoc"> -

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    +

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    \[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
 \int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \]

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-12-27 18:25:16.912929413 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-12-27 18:25:16.916929440 +0000 @@ -165,8 +165,8 @@ const double factor = 1.&#href_anchor"memdoc">

    Laplacian in weak form, namely on the cell Z the matrix

    -\[
-\int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[
+\int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    The FiniteElement in fe may be scalar or vector valued. In the latter case, the Laplacian is applied to each component separately.

    @@ -210,7 +210,7 @@

    Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    Definition at line 91 of file laplace.h.

    @@ -253,7 +253,7 @@

    Vector-valued Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u : \nabla v \, dx. \] +\[ \int_Z \nu \nabla u : \nabla v \, dx. \]

    Definition at line 118 of file laplace.h.

    @@ -288,11 +288,11 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition of Nitsche type for the Laplacian, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u v - \partial_n u v - u \partial_n v\Bigr)\;ds.
-\] +\]" src="form_1632.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 156 of file laplace.h.

    @@ -326,12 +326,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition of Nitsche type for the Laplacian applied to the tangential component only, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau v_\tau - \partial_n u_\tau v_\tau - u_\tau
 \partial_n v_\tau\Bigr)\;ds.
-\] +\]" src="form_1633.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 197 of file laplace.h.

    @@ -380,12 +380,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, scalar version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) v - \partial_n u v - (u-g) \partial_n
 v\Bigr)\;ds.
-\] +\]" src="form_1634.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 260 of file laplace.h.

    @@ -434,13 +434,13 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u- \mathbf g) \cdot \mathbf v
 - \partial_n \mathbf u \cdot \mathbf v
 - (\mathbf u-\mathbf g) \cdot \partial_n \mathbf v\Bigr)\;ds.
-\] +\]" src="form_1635.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 307 of file laplace.h.

    @@ -499,10 +499,10 @@ double factor2 = -1.&#href_anchor"memdoc">

    Flux for the interior penalty method for the Laplacian, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1636.png"/>

    The penalty parameter should always be the mean value of the penalties needed for stability on each side. In the case of constant coefficients, it can be computed using compute_penalty().

    If factor2 is missing or negative, the factor is assumed the same on both sides. If factors differ, note that the penalty parameter has to be computed accordingly.

    @@ -564,10 +564,10 @@ double factor2 = -1.&#href_anchor"memdoc">

    Flux for the interior penalty method for the Laplacian applied to the tangential components of a vector field, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u_\tau][v_\tau] - \{\nabla u_\tau\}[v_\tau\mathbf
 n] - [u_\tau\mathbf n]\{\nabla v_\tau\} \Bigr) \; ds.
-\] +\]" src="form_1637.png"/>

    Warning
    This function is still under development!
    @@ -638,10 +638,10 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1636.png"/>

    Definition at line 543 of file laplace.h.

    @@ -711,11 +711,11 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Vector-valued residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u]\cdot[\mathbf v]
 - \{\nabla \mathbf u\}[\mathbf v\otimes \mathbf n]
 - [\mathbf u\otimes \mathbf n]\{\nabla \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1638.png"/>

    Definition at line 610 of file laplace.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-12-27 18:25:16.944929632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-12-27 18:25:16.952929687 +0000 @@ -133,22 +133,22 @@

    Local integrators related to curl operators and their traces.

    We use the following conventions for curl operators. First, in three space dimensions

    -\[
+<picture><source srcset=\[
 \nabla\times \mathbf u = \begin{pmatrix}
   \partial_2 u_3 - \partial_3 u_2 \\
   \partial_3 u_1 - \partial_1 u_3 \\
   \partial_1 u_2 - \partial_2 u_1
 \end{pmatrix}.
-\] +\]" src="form_1641.png"/>

    -

    In two space dimensions, the curl is obtained by extending a vector u to $(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    -\[
+<p>In two space dimensions, the curl is obtained by extending a vector <b>u</b> to <picture><source srcset=$(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    +\[
  \nabla \times \mathbf u = \partial_1 u_2 - \partial_2 u_1,
  \qquad
  \nabla \times p = \begin{pmatrix}
    \partial_2 p \\ -\partial_1 p
  \end{pmatrix}
-\] +\]" src="form_1644.png"/>

    Function Documentation

    @@ -174,15 +174,15 @@ const Tensor< 2, dim > & h2&#href_anchor"memdoc">

    Auxiliary function. Given the tensors of dim second derivatives, compute the curl of the curl of a vector function. The result in two and three dimensions is:

    -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 - \partial_2^2 u_1 \\
 \partial_1\partial_2 u_1 - \partial_1^2 u_2
 \end{pmatrix}
-\] +\]" src="form_1645.png"/>

    and

    -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 + \partial_1\partial_3 u_3
 - (\partial_2^2+\partial_3^2) u_1 \\
@@ -191,7 +191,7 @@
 \partial_3\partial_1 u_1 + \partial_3\partial_2 u_2
 - (\partial_1^2+\partial_2^2) u_3
 \end{pmatrix}.
-\] +\]" src="form_1646.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -227,9 +227,9 @@ const Tensor< 1, dim > & normal&#href_anchor"memdoc">

    Auxiliary function. Given dim tensors of first derivatives and a normal vector, compute the tangential curl

    -\[
+<picture><source srcset=\[
 \mathbf n \times \nabla \times u.
-\] +\]" src="form_1647.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -260,10 +260,10 @@ const double factor = 1.&#href_anchor"memdoc">

    The curl-curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla\times u \cdot
 \nabla \times v \,dx
-\] +\]" src="form_1648.png"/>

    in weak form.

    @@ -299,9 +299,9 @@ double factor = 1.&#href_anchor"memdoc">

    The matrix for the curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla \times u \cdot v \,dx.
-\] +\]" src="form_1649.png"/>

    This is the standard curl operator in 3d and the scalar curl in 2d. The vector curl operator can be obtained by exchanging test and trial functions.

    @@ -343,14 +343,14 @@ double factor = 1.&#href_anchor"memdoc">

    The matrix for weak boundary condition of Nitsche type for the tangential component in Maxwell systems.

    -\[
+<picture><source srcset=\[
 \int_F \biggl( 2\gamma
 (u\times n) (v\times n) -
 (u\times n)(\nu \nabla\times
 v) - (v\times
 n)(\nu \nabla\times u)
 \biggr)
-\] +\]" src="form_1650.png"/>

    Definition at line 266 of file maxwell.h.

    @@ -380,10 +380,10 @@ double factor = 1.&#href_anchor"memdoc">

    The product of two tangential traces,

    -\[
+<picture><source srcset=\[
 \int_F (u\times n)(v\times n)
 \, ds.
-\] +\]" src="form_1651.png"/>

    Definition at line 329 of file maxwell.h.

    @@ -451,14 +451,14 @@

    The interior penalty fluxes for Maxwell systems.

    -\[
+<picture><source srcset=\[
 \int_F \biggl( \gamma
 \{u\times n\}\{v\times n\} -
 \{u\times n\}\{\nu \nabla\times
 v\}- \{v\times
 n\}\{\nu \nabla\times u\}
 \biggr)\;dx
-\] +\]" src="form_1652.png"/>

    Definition at line 386 of file maxwell.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-12-27 18:25:16.988929935 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-12-27 18:25:16.992929962 +0000 @@ -175,8 +175,8 @@
    -

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    -

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    +

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    +

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    over the cell/face. The value "unassigned" is used to describe that the location of a cell/face has not yet been determined.

    @@ -242,7 +242,7 @@
    Enumerator
    inside 
    const AffineConstraints< number > & immersed_constraints = AffineConstraints<number>()&#href_anchor"memdoc">

    Create a coupling sparsity pattern for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
 \text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    \[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
@@ -250,9 +250,9 @@
 \]

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    +

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -370,7 +370,7 @@
    const AffineConstraints< typename Matrix::value_type > & immersed_constraints = AffineConstraints<typename&#href_anchor"memdoc">

    Create a coupling mass matrix for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
 \text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

    \[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
@@ -378,9 +378,9 @@
 \]

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    +

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero entries non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -513,7 +513,7 @@ \quad i \in [0,n), \alpha \in [0,m), \]" src="form_2108.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that at least one of the triangulations is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if both triagnulations are of type parallel::distributed::Triangulation<dim1,spacedim>.

    @@ -596,7 +596,7 @@ \quad i \in [0,n), \alpha \in [0,m), \]" src="form_2108.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern() function.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-12-27 18:25:17.028930209 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-12-27 18:25:17.032930237 +0000 @@ -292,7 +292,7 @@
    -

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    +

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    Definition at line 202 of file quadrature_generator.cc.

    @@ -314,20 +314,20 @@
    -

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    -

    Let $J_I$ be the index set of the indefinite functions:

    -

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    -

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    -

    $|\partial_k \psi_j| > L_{jk}$.

    -

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    +

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    +

    Let $J_I$ be the index set of the indefinite functions:

    +

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    +

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    +

    $|\partial_k \psi_j| > L_{jk}$.

    +

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    -\[
+<picture><source srcset=\[
 i = \arg \max_{k} \min_{j \in J_I} L_{jk}, \\
 L =      \max_{k} \min_{j \in J_I} L_{jk}.
-\] +\]" src="form_2222.png"/>

    -

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    -

    $|\partial_i \psi_j| > L$.

    +

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    +

    $|\partial_i \psi_j| > L$.

    Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

    Definition at line 276 of file quadrature_generator.cc.

    @@ -399,7 +399,7 @@ std::pair< double, double > & value_bounds&#href_anchor"memdoc"> -

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    +

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    $[\min(L, L_f), \max(U, U_f)]$,

    where $L_f = \min_{v} f(x_v)$, $U_f = \max_{v} f(x_v)|$, and $x_v$ is a vertex.

    It is assumed that the incoming function is scalar valued.

    @@ -487,7 +487,7 @@
    -

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    +

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    $L_a \leq |f(x)|$,

    by estimating it from the incoming lower and upper bounds: $L \leq f(x) \leq U$.

    By rewriting the lower and upper bounds as $F - C \leq f(x) \leq F + C$, where $L = F - C$, $U = F + C$ (or $F = (U + L)/2$, $C = (U - L)/2$), we get $|f(x) - F| \leq C$. Using the inverse triangle inequality gives $|F| - |f(x)| \leq |f(x) - F| \leq C$. Thus, $L_a = |F| - C$.

    @@ -676,7 +676,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    +

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    \[
 X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q),
 W_q = w_I (y_{i+1} - y_i) w_q,
@@ -761,7 +761,7 @@
           <td></td>
           <td class=const std::optional< HeightDirectionData > & height_direction_data&#href_anchor"memdoc">

    Return the coordinate direction that the box should be split in, assuming that the box should be split it half.

    -

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    +

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    Definition at line 1018 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 2024-12-27 18:25:17.068930484 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 2024-12-27 18:25:17.076930539 +0000 @@ -432,7 +432,7 @@
    -

    Perform the intersection of the given topological shape with the plane $c_x x + c_y y + c_z z +c = 0$. The returned topological shape will contain as few bsplines as possible. An exception is thrown if the intersection produces an empty shape.

    +

    Perform the intersection of the given topological shape with the plane $c_x x + c_y y + c_z z +c = 0$. The returned topological shape will contain as few bsplines as possible. An exception is thrown if the intersection produces an empty shape.

    Definition at line 427 of file utilities.cc.

    @@ -570,7 +570,7 @@ const Mapping< 2, spacedim > & mapping = StaticMappingQ1<2,&#href_anchor"memdoc">

    Given a Triangulation and an optional Mapping, create a vector of smooth curves that interpolate the connected parts of the boundary vertices of the Triangulation and return them as a vector of TopoDS_Edge objects.

    -

    This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

    +

    This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

    The returned curves are ordered with respect to the indices of the faces that make up the triangulation boundary, i.e., the first curve is the one extracted starting from the face with the lowest index, and so on.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-12-27 18:25:17.096930676 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-12-27 18:25:17.100930704 +0000 @@ -154,21 +154,21 @@
    const ComponentMask & space_comps = {}&#href_anchor"memdoc">

    Create an interpolation sparsity pattern for particles.

    -

    Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    -\[
+<p>Given a triangulation representing the domain <picture><source srcset=$\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    +\[
 M_{i,j} \dealcoloneq v_j(x_i) ,
-\] +\]" src="form_2514.png"/>

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2515.png"/>

    -

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    +

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    +

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::add_entries_local_to_global() is used to fill the final sparsity pattern.

    Definition at line 31 of file utilities.cc.

    @@ -205,21 +205,21 @@
    const ComponentMask & space_comps = {}&#href_anchor"memdoc">

    Create an interpolation matrix for particles.

    -

    Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    -\[
+<p>Given a triangulation representing the domains <picture><source srcset=$\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    +\[
 M_{ij} \dealcoloneq v_j(x_i) ,
-\] +\]" src="form_2517.png"/>

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2515.png"/>

    -

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    +

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    +

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::distribute_local_to_global() is used to distribute the entries of the matrix to respect the given constraints.

    Definition at line 113 of file utilities.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-12-27 18:25:17.144931006 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-12-27 18:25:17.152931061 +0000 @@ -196,7 +196,7 @@ \end{array} \right] , \]" src="form_2555.png"/>

    -

    where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    +

    where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    \[
 \mathbf{T} \dealcoloneq \left[ \begin{array}{ccc}
  T_{00} & T_{01} & T_{02} \\
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html	2024-12-27 18:25:17.176931225 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html	2024-12-27 18:25:17.184931280 +0000
@@ -136,7 +136,7 @@
 <tr class=

    &#href_anchor"details" id="details">

    Detailed Description

    A collection of operations to assist in the transformation of tensor quantities from the reference to spatial configuration, and vice versa. These types of transformation are typically used to re-express quantities measured or computed in one configuration in terms of a second configuration.

    Notation

    -

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    +

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    As a further point on notation, we will follow Holzapfel (2007) and denote the push forward transformation as $\chi\left(\bullet\right)$ and the pull back transformation as $\chi^{-1}\left(\bullet\right)$. We will also use the annotation $\left(\bullet\right)^{\sharp}$ to indicate that a tensor $\left(\bullet\right)$ is a contravariant tensor, and $\left(\bullet\right)^{\flat}$ that it is covariant. In other words, these indices do not actually change the tensor, they just indicate the kind of object a particular tensor is.

    Note
    For these transformations, unless otherwise stated, we will strictly assume that all indices of the transformed tensors derive from one coordinate system; that is to say that they are not multi-point tensors (such as the Piola stress in elasticity).

    Function Documentation

    @@ -205,8 +205,8 @@

    Parameters
    - - + +
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -240,7 +240,7 @@
    Parameters
    - +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -274,7 +274,7 @@
    Parameters
    - +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -307,7 +307,7 @@
    Parameters
    - +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -340,7 +340,7 @@
    Parameters
    - +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-12-27 18:25:17.204931418 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-12-27 18:25:17.212931473 +0000 @@ -484,11 +484,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\sharp} \right]_{IJKL}
    \dealcoloneq F^{-1}_{Ii} F^{-1}_{Jj}
    \left(\bullet\right)^{\sharp}_{ijkl} F^{-1}_{Kk} F^{-1}_{Ll}
-\] +\]" src="form_2619.png"/>

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-12-27 18:25:17.240931665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-12-27 18:25:17.240931665 +0000 @@ -162,10 +162,10 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2620.png"/>

    Parameters
    @@ -197,11 +197,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2621.png"/>

    Parameters
    @@ -233,11 +233,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 covariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2621.png"/>

    Parameters
    @@ -269,11 +269,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 covariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2622.png"/>

    Parameters
    @@ -305,11 +305,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 covariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2622.png"/>

    Parameters
    @@ -341,10 +341,10 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2623.png"/>

    Parameters
    @@ -376,11 +376,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat} \cdot
 \mathbf{F}
-\] +\]" src="form_2624.png"/>

    Parameters
    @@ -412,11 +412,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 covariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}
-\] +\]" src="form_2625.png"/>

    Parameters
    @@ -448,11 +448,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\flat} \right]_{IJKL}
  \dealcoloneq F^{T}_{Ii} F^{T}_{Jj}
  \left(\bullet\right)^{\flat}_{ijkl} F^{T}_{Kk} F^{T}_{Ll}
-\] +\]" src="form_2626.png"/>

    Parameters
    @@ -484,11 +484,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\flat} \right]_{IJKL}
  \dealcoloneq F^{T}_{Ii} F^{T}_{Jj}
  \left(\bullet\right)^{\flat}_{ijkl} F^{T}_{Kk} F^{T}_{Ll}
-\] +\]" src="form_2626.png"/>

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-12-27 18:25:17.264931830 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-12-27 18:25:17.264931830 +0000 @@ -153,11 +153,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
  \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
  \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2627.png"/>

    Parameters
    @@ -167,8 +167,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{V} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{V} \right)$
    @@ -190,11 +190,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2629.png"/>

    Parameters
    @@ -204,8 +204,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -227,11 +227,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2629.png"/>

    Parameters
    @@ -241,8 +241,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -264,12 +264,12 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2631.png"/>

    Parameters
    @@ -279,8 +279,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -302,12 +302,12 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2631.png"/>

    Parameters
    @@ -317,8 +317,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -340,11 +340,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2633.png"/>

    Parameters
    @@ -354,8 +354,8 @@
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
+\right)$
    @@ -377,11 +377,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2635.png"/>

    Parameters
    @@ -391,8 +391,8 @@
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
+\right)$
    @@ -414,11 +414,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2635.png"/>

    Parameters
    @@ -428,8 +428,8 @@
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html	2024-12-27 18:25:17.292932022 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html	2024-12-27 18:25:17.292932022 +0000
@@ -136,11 +136,11 @@
           <td class=
    const Tensor< 1, spacedim, Number > & b&#href_anchor"memdoc"> -

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    +

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    This function uses the geometric definition of the scalar product.

    -\[
+<picture><source srcset=\[
   \vec{a} \cdot \vec{b} = \|\vec{a}\| \|\vec{b}\| \cos(\theta)
-\] +\]" src="form_2640.png"/>

    @@ -168,20 +168,20 @@
    const Tensor< 1, spacedim, Number > & axis&#href_anchor"memdoc">

    Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

    -

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    +

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    The vector axis needs to be a unit vector and be perpendicular to both vectors a and b.

    This function uses the geometric definitions of both the scalar and cross product.

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \vec{a} \cdot  \vec{b} &= \|\vec{a}\| \|\vec{b}\| \cos(\theta) \\
   \vec{a} \times \vec{b} &= \|\vec{a}\| \|\vec{b}\| \sin(\theta) \vec{n}
-\end{align*} +\end{align*}" src="form_2642.png"/>

    We can create the tangent of the angle using both products.

    -\[
+<picture><source srcset=\[
   \tan{\theta}
   = \frac{\sin(\theta)}{\cos(theta)}
   = \frac{(\vec{a} \times \vec{b}) \cdot \vec{n}}{\vec{a} \cdot \vec{b}}
-\] +\]" src="form_2643.png"/>

    Note
    Only applicable for three-dimensional vectors spacedim == 3.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-12-27 18:25:17.316932187 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-12-27 18:25:17.320932214 +0000 @@ -123,13 +123,13 @@
    &#href_anchor"memitem:">class  TransformationSpectrumFolding
    &#href_anchor"details" id="details">

    Detailed Description

    Base namespace for solver classes using the SLEPc solvers which are selected based on flags passed to the eigenvalue problem solver context. Derived classes set the right flags to set the right solver.

    -

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    +

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    SLEPcWrappers can be implemented in application codes in the following way:

    SolverControl solver_control (1000, 1e-9);
    SolverArnoldi system (solver_control, mpi_communicator);
    system.solve (A, B, lambda, x, size_of_spectrum);
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    system.set_which_eigenpairs (EPS_SMALLEST_REAL);

    These options can also be set at the command line.

    See also step-36 for a hands-on example.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-12-27 18:25:17.340932352 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-12-27 18:25:17.344932379 +0000 @@ -192,7 +192,7 @@
    const VectorType &b,
    double tol)>

    Type of function objects to interface with SUNDIALS' linear solvers

    -

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    +

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-12-27 18:25:17.372932571 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-12-27 18:25:17.376932599 +0000 @@ -122,19 +122,19 @@
    [in]opA LinearOperator that applies the matrix vector product

    Detailed Description

    Smoothness estimation strategy based on the decay of Fourier expansion coefficients.

    -

    From the definition, we can write our Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    -\begin{eqnarray*}
+<p>From the definition, we can write our <a class=Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    +\begin{eqnarray*}
    u_h({\bf x}) &=& \sum_j u_j \varphi_j ({\bf x}) \\
    u_{h, {\bf k}}({\bf x}) &=&
      \sum_{{\bf k}, \|{\bf k}\|\le p} a_{\bf k} \phi_{\bf k}({\bf x}),
      \quad a_{\bf k} = \sum_j {\cal F}_{{\bf k},j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2315.png"/>

    -

    with $u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
-{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
-F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    -

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    -\begin{eqnarray*}
+<p> with <picture><source srcset=$u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
+{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
+F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    +

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    +\begin{eqnarray*}
   \| \nabla^s u_h({\bf x}) \|_{L^2(K)}^2 &=&
     \int\limits_K \left| \nabla^s u_h({\bf x}) \right|^2 d{\bf x} <
     \infty \\
@@ -143,40 +143,40 @@
     a_{\bf k} \, \phi_{\bf k}({\bf x}) \right|^2 d{\bf x} =
     (2 \pi)^{2s} \sum\limits_{\bf k} \left| a_{\bf k} \right|^2
     \|{\bf k}\|_2^{2s} < \infty
-\end{eqnarray*} +\end{eqnarray*}" src="form_2319.png"/>

    The sum is finite only if the summands decay at least with order

    -\[
+<picture><source srcset=\[
   |a_{\bf k}|^2 \|{\bf k}\|_2^{2s} \|{\bf k}\|_2^{d - 1} =
     {\cal O}\left( \|{\bf k}\|_2^{-1-\epsilon} \right)
-\] +\]" src="form_2320.png"/>

    -

    for all $\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
-d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    -\[
+<p> for all <picture><source srcset=$\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
+d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    +\[
   |a_{\bf k}| = {\cal O}\left(\|{\bf k}\|_2^
     {-\left(s + \frac d2 + \epsilon \right)} \right)
-\] +\]" src="form_2325.png"/>

    -

    The next step is to estimate how fast these coefficients decay with $\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    -\[
+<p>The next step is to estimate how fast these coefficients decay with <picture><source srcset=$\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    +\[
    \min_{\alpha,\sigma}
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( |a_{\bf k}| - \alpha \|{\bf k}\|_2^{-\sigma}\right)^2
-\] +\]" src="form_2327.png"/>

    -

    with regression coefficients $\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    -\[
+<p> with regression coefficients <picture><source srcset=$\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    +\[
    \min_{\beta,\sigma}
    Q(\beta,\sigma) =
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( \ln |a_{\bf k}| - \beta + \sigma \ln \|{\bf k}\|_2
 \right)^2,
-\] +\]" src="form_2328.png"/>

    -

    where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
-\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    -\[
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    +\[
    \left(\begin{array}{cc}
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} 1 &
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} \ln \|{\bf k}\|_2
@@ -193,10 +193,10 @@
    \\
    \sum_{{\bf k}, \|{\bf k}\|_2\le p} \ln |a_{{\bf k}}| \ln \|{\bf
 k}\|_2 \end{array}\right)
-\] +\]" src="form_2332.png"/>

    -

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    -

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    +

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    +

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    Note
    An extensive demonstration of the use of these functions is provided in step-27.

    Function Documentation

    @@ -241,17 +241,17 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\[
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\[
   \ln \left( \max\limits_{\|{\bf k}\|_2} |a_{\bf k}| \right) \sim
     C - \sigma \ln \|{\bf k}\|_2
-\] +\]" src="form_2346.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    -

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    +

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 368 of file smoothness_estimator.cc.

    @@ -300,11 +300,11 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    -

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 466 of file smoothness_estimator.cc.

    @@ -332,7 +332,7 @@

    Returns a FESeries::Fourier object for Fourier series expansions with the default configuration for smoothness estimation purposes.

    -

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    +

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    As the Fourier expansion can only be performed on scalar fields, this class does not operate on vector-valued finite elements and will therefore throw an assertion. However, each component of a finite element field can be treated as a scalar field, respectively, on which Fourier expansions are again possible. For this purpose, the optional parameter component defines which component of each FiniteElement will be used. The default value of component only applies to scalar FEs, in which case it indicates that the sole component is to be decomposed. For vector-valued FEs, a non-default value must be explicitly provided.

    Definition at line 575 of file smoothness_estimator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-12-27 18:25:17.404932791 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-12-27 18:25:17.404932791 +0000 @@ -122,25 +122,25 @@

    Detailed Description

    Smoothness estimation strategy based on the decay of Legendre expansion coefficients.

    -

    In one dimension, the finite element solution on cell $K$ with polynomial degree $p$ can be written as

    -\begin{eqnarray*}
+<p>In one dimension, the finite element solution on cell <picture><source srcset=$K$ with polynomial degree $p$ can be written as

    +\begin{eqnarray*}
    u_h(x) &=& \sum_j u_j \varphi_j (x) \\
    u_{h, k}(x) &=& \sum_{k=0}^{p} a_k \widetilde P_k (x),
    \quad a_k = \sum_j {\cal L}_{k,j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2305.png"/>

    -

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    +

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    A function is analytic, i.e., representable by a power series, if and only if their Legendre expansion coefficients decay as (see [eibner2007hp])

    -\[
+<picture><source srcset=\[
   |a_k| \sim c \, \exp(-\sigma k)
-\] +\]" src="form_2309.png"/>

    -

    We determine their decay rate $\sigma$ by performing the linear regression fit of

    -\[
+<p> We determine their decay rate <picture><source srcset=$\sigma$ by performing the linear regression fit of

    +\[
   \ln |a_k| \sim C - \sigma k
-\] +\]" src="form_2311.png"/>

    -

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    +

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    Function Documentation

    ◆ coefficient_decay()

    @@ -184,24 +184,24 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\begin{eqnarray*}
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\begin{eqnarray*}
   \widetilde P_{\bf k}({\bf x}) &=&
     \widetilde P_{k_1} (x_1) \ldots \widetilde P_{k_d} (x_d) \\
   \ln \left( \max\limits_{\|{\bf k}\|_1} |a_{\bf k}| \right) &\sim&
     C - \sigma \|{\bf k}\|_1
-\end{eqnarray*} +\end{eqnarray*}" src="form_2338.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]dof_handlerA DoFHandler.
    [in]solutionA solution vector.
    [out]smoothness_indicatorsA vector for smoothness indicators.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.
    @@ -254,16 +254,16 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]dof_handlerA DoFHandler
    [in]solutionA solution vector
    [out]smoothness_indicatorsA vector for smoothness indicators
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to NaN.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-12-27 18:25:17.424932928 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-12-27 18:25:17.428932956 +0000 @@ -152,18 +152,18 @@
    SparsityPatternType2 & sparsity_pattern_out&#href_anchor"memdoc">

    Given a sparse matrix (system_matrix, sparsity_pattern), construct a new sparse matrix (system_matrix_out, sparsity_pattern_out) by restriction

    -\[
+<picture><source srcset=\[
  A_i = R_i A R_i^T,
-\] +\]" src="form_2014.png"/>

    -

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    -

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    +

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    +

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    Such a function is useful to implement Schwarz methods, where operations of type

    -\[
+<picture><source srcset=\[
  u^{n} = u^{n-1} + \sum_{i} R_i^T A_i^{-1} R_i (f - A u^{n-1})
-\] +\]" src="form_2016.png"/>

    -

    are performed to iteratively solve a system of type $Au=f$.

    +

    are performed to iteratively solve a system of type $Au=f$.

    Warning
    This is a collective call that needs to be executed by all processes in the communicator of sparse_matrix_in.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-12-27 18:25:17.456933148 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-12-27 18:25:17.456933148 +0000 @@ -191,7 +191,7 @@
    Note
    This function returns an internal class object consisting of an array subscript operator operator[](unsigned int) and an alias value_type describing its return value.
    Template Parameters
    - +
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    rankRank of the tensorial object t
    TA tensorial object of rank rank. T must provide a local alias value_type and an index operator operator[]() that returns a (const or non-const) reference of value_type.
    @@ -275,12 +275,12 @@

    This function contracts two tensorial objects left and right and stores the result in result. The contraction is done over the last no_contr indices of both tensorial objects:

    -\[
+<picture><source srcset=\[
   \text{result}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   = \sum_{k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{left}_{i_1,..,i_{r1},k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{right}_{j_1,..,j_{r2},k_1,..,k_{\mathrm{no\_contr}}}
-\] +\]" src="form_900.png"/>

    Calling this function is equivalent of writing the following low level code:

    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    ...
    @@ -335,12 +335,12 @@

    Full contraction of three tensorial objects:

    -\[
+<picture><source srcset=\[
   \sum_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{left}_{i_1,..,i_{r1}}
   \text{middle}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{right}_{j_1,..,j_{r2}}
-\] +\]" src="form_901.png"/>

    Calling this function is equivalent of writing the following low level code:

    T1 result = T1();
    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 2024-12-27 18:25:17.508933505 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 2024-12-27 18:25:17.512933533 +0000 @@ -1034,13 +1034,13 @@

    Calculate a fixed power, provided as a template argument, of a number.

    -

    This function provides an efficient way to calculate things like $t^N$ where N is a known number at compile time. The function computes the power of $t$ via the "recursive doubling" approach in which, for example, $t^7$ is computed as

    +

    This function provides an efficient way to calculate things like $t^N$ where N is a known number at compile time. The function computes the power of $t$ via the "recursive doubling" approach in which, for example, $t^7$ is computed as

    \begin{align*}
   t^7 = (tttt)(tt)(t)
 \end{align*}

    where computing $tt$ requires one product, computing $tttt$ is achieved by multiplying the previously computed $tt$ by itself (requiring another multiplication), and then the product is computed via two more multiplications for a total of 4 multiplications instead of the naively necessary 6.

    -

    The major savings this function generates result, however, from the fact that it exploits that we have an integer power of the argument $t$. The alternative to computing such powers, std::pow(t,7) uses the std::pow function that takes the exponent as a floating point number and, because it has to cater to the complexities of the general situation, is vastly slower.

    +

    The major savings this function generates result, however, from the fact that it exploits that we have an integer power of the argument $t$. The alternative to computing such powers, std::pow(t,7) uses the std::pow function that takes the exponent as a floating point number and, because it has to cater to the complexities of the general situation, is vastly slower.

    Use this function as in fixed_power<dim> (t) or fixed_power<7> (t).

    Definition at line 942 of file utilities.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-12-27 18:25:17.544933752 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-12-27 18:25:17.548933780 +0000 @@ -147,7 +147,7 @@

    Return the elements of a continuous Givens rotation matrix and the norm of the input vector.

    -

    That is for a given pair x and y, return $c$ , $s$ and $\sqrt{x^2+y^2}$ such that

    +

    That is for a given pair x and y, return $c$ , $s$ and $\sqrt{x^2+y^2}$ such that

    \[
 \begin{bmatrix}
 c  & s \\
@@ -189,7 +189,7 @@
       </table>
 </div><div class=

    Return the elements of a hyperbolic rotation matrix.

    -

    That is for a given pair x and y, return $c$ , $s$ and $r$ such that

    +

    That is for a given pair x and y, return $c$ , $s$ and $r$ such that

    \[
 \begin{bmatrix}
 c  & -s \\
@@ -289,8 +289,8 @@
           <td class=

    VectorMemory< VectorType > & vector_memory&#href_anchor"memdoc"> -

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
-\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    +

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
+\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

    @@ -298,8 +298,8 @@
    -

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    -

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    +

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    +

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    vector_memory is used to allocate memory for temporary objects.

    This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from [Zhou2014].

    Note
    If tau is equal to std::numeric_limits<double>::infinity(), no normalization will be performed.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 2024-12-27 18:25:17.596934109 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 2024-12-27 18:25:17.596934109 +0000 @@ -1313,7 +1313,7 @@
    const MPI_Comm comm&#href_anchor"memdoc"> -

    For each process $p$ on a communicator with $P$ processes, compute both the (exclusive) partial sum $\sum_{i=0}^{p-1} v_i$ and the total sum $\sum_{i=0}^{P-1} v_i$, and return these two values as a pair. The former is computed via the MPI_Exscan function where the partial sum is typically called "(exclusive) scan" of the values $v_p$ provided by the individual processes. The term "prefix sum" is also used.

    +

    For each process $p$ on a communicator with $P$ processes, compute both the (exclusive) partial sum $\sum_{i=0}^{p-1} v_i$ and the total sum $\sum_{i=0}^{P-1} v_i$, and return these two values as a pair. The former is computed via the MPI_Exscan function where the partial sum is typically called "(exclusive) scan" of the values $v_p$ provided by the individual processes. The term "prefix sum" is also used.

    This function is only available if T is a type natively supported by MPI.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-12-27 18:25:17.632934357 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-12-27 18:25:17.636934384 +0000 @@ -140,7 +140,7 @@
    std::vector< unsigned int >&#href_anchor"memTemplItemRight" valign="bottom">selector (const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< void(const unsigned int, const RequestType &)> &process_request, const MPI_Comm comm)
    &#href_anchor"details" id="details">

    Detailed Description

    A namespace for algorithms that implement the task of communicating in a dynamic-sparse way. In computer science, this is often called a consensus problem.

    -

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    +

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    That this is not a trivial problem can be seen by an analogy of the postal service. There, some senders may request information from some other participants in the postal service. So they send a letter that requests the information, but the recipients do not know how many such letters they need to expect (or that they should expect any at all). They also do not know how long they need to keep checking their mailbox for incoming requests. The recipients can be considered reliable, however: We can assume that everyone who is sent a request puts a letter with the answer in the mail. This time at least the recipients of these answers know that they are waiting for these answers because they have previously sent a request. They do not know in advance, however, when the answer will arrive and how long to wait. The goal of a consensus algorithm is then to come up with a strategy in which every participant can say who they want to send requests to, what that request is, and is then guaranteed an answer. The algorithm will only return when all requests by all participants have been answered and the answer delivered to the requesters.

    The problem is generally posed in terms of requests and answers. In practice, either of these two may be empty messages. For example, processes may simply want to send information to others that they know these others need; in this case, the "answer" message may be empty and its meaning is simply an affirmation that the information was received. Similarly, in some cases processes simply need to inform others that they want information, but the destination process knows what information is being requested (based on where in the program the request happens) and can send that information without there be any identifying information in the request; in that case, the request message may be empty and simply serve to identify the requester. (Each message can be queried for its sender.)

    As mentioned in the first paragraph, the algorithms we are interested in are "dynamic-sparse":

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-12-27 18:25:17.756935208 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-12-27 18:25:17.760935235 +0000 @@ -343,7 +343,7 @@

    • -

      Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

      +

      Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

      In order to get proper results, it be may necessary to treat boundary conditions right. Below are listed some cases where this may be needed. If needed, this is done by L2-projection of the trace of the given function onto the finite element space restricted to the boundary of the domain, then taking this information and using it to eliminate the boundary nodes from the mass matrix of the whole domain, using the MatrixTools::apply_boundary_values() function. The projection of the trace of the function to the boundary is done with the VectorTools::project_boundary_values() (see below) function, which is called with a map of boundary functions std::map<types::boundary_id, const Function<spacedim,number>*> in which all boundary indicators from zero to numbers::internal_face_boundary_id-1 (numbers::internal_face_boundary_id is used for other purposes, see the Triangulation class documentation) point to the function to be projected. The projection to the boundary takes place using a second quadrature formula on the boundary given to the project() function. The first quadrature formula is used to compute the right hand side and for numerical quadrature of the mass matrix.

      The projection of the boundary values first, then eliminating them from the global system of equations is not needed usually. It may be necessary if you want to enforce special restrictions on the boundary values of the projected function, for example in time dependent problems: you may want to project the initial values but need consistency with the boundary values for later times. Since the latter are projected onto the boundary in each time step, it is necessary that we also project the boundary values of the initial values, before projecting them to the whole domain.

      Obviously, the results of the two schemes for projection are different. Usually, when projecting to the boundary first, the L2-norm of the difference between original function and projection over the whole domain will be larger (factors of five have been observed) while the L2-norm of the error integrated over the boundary should of course be less. The reverse should also hold if no projection to the boundary is performed.

      @@ -406,220 +406,220 @@
    -

    Denote which norm/integral is to be computed by the integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

    -

    In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

    -\[
+<p>Denote which norm/integral is to be computed by the <a class=integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

    +

    In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

    +\[
 \int_A f(x) dx \approx \sum_q f(x_q) \omega_q.
-\] +\]" src="form_2398.png"/>

    Similarly for suprema over a cell $T$:

    -\[
+<picture><source srcset=\[
 \sup_{x\in T} |f(x)| dx \approx \max_q |f(x_q)|.
-\] +\]" src="form_2399.png"/>

    - -
    Enumerator
    mean 

    The function or difference of functions is integrated on each cell $K$:

    -\[
+<picture><source srcset=\[
   E_K
 = \int_K \sum_c (\hat{f}_c - f_c) \, w_c
 = \int_K \sum_c e_c \, w_c
-\] +\]" src="form_2400.png"/>

    and summed up to get

    -\[
+<picture><source srcset=\[
   E = \sum_K E_K
     = \int_\Omega \sum_c (\hat{f}_c - f_c) \, w_c
-\] +\]" src="form_2401.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \int_\Omega (\hat{f} - f)
     = \int_\Omega e.
-\] +\]" src="form_2403.png"/>

    -

    Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

    +

    Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

    L1_norm 

    The absolute value of the function is integrated:

    -\[
+<picture><source srcset=\[
   E_K = \int_K \sum_c |e_c| \, w_c
-\] +\]" src="form_2406.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sum_K E_K = \int_\Omega \sum_c |e_c| w_c,
-\] +\]" src="form_2407.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E  = \| e \|_{L^1}.
-\] +\]" src="form_2408.png"/>

    L2_norm 

    The square of the function is integrated and the square root of the result is computed on each cell:

    -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2409.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega  \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2410.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \sqrt{ \int_\Omega e^2 }
     = \| e \|_{L^2}
-\] +\]" src="form_2411.png"/>

    Lp_norm 

    The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

    -\[
+<tr><td class=Lp_norm 

    The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

    +\[
   E_K = \left( \int_K \sum_c |e_c|^p \, w_c \right)^{1/p}
-\] +\]" src="form_2412.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \left( \sum_K E_K^p \right)^{1/p}
-\] +\]" src="form_2413.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \| e \|_{L^p}.
-\] +\]" src="form_2414.png"/>

    Linfty_norm 

    The maximum absolute value of the function:

    -\[
+<picture><source srcset=\[
   E_K = \sup_K \max_c |e_c| \, w_c
-\] +\]" src="form_2415.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \max_K E_K
 = \sup_\Omega \max_c |e_c| \, w_c
-\] +\]" src="form_2416.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E  = \sup_\Omega \|e\|_\infty = \| e \|_{L^\infty}.
-\] +\]" src="form_2417.png"/>

    H1_seminorm 

    L2_norm of the gradient:

    -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c (\nabla e_c)^2 \, w_c }
-\] +\]" src="form_2418.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega \sum_c (\nabla e_c)^2 \,
 w_c }
-\] +\]" src="form_2419.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \| \nabla e \|_{L^2}.
-\] +\]" src="form_2420.png"/>

    Hdiv_seminorm 

    L2_norm of the divergence of a vector field. The function $f$ is expected to have $c \geq \text{dim}$ components and the first dim will be used to compute the divergence:

    -\[
+<tr><td class=Hdiv_seminorm 

    L2_norm of the divergence of a vector field. The function $f$ is expected to have $c \geq \text{dim}$ components and the first dim will be used to compute the divergence:

    +\[
   E_K = \sqrt{ \int_K \left( \sum_c \frac{\partial e_c}{\partial x_c} \,
 \sqrt{w_c} \right)^2 }
-\] +\]" src="form_2422.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2}
     = \sqrt{ \int_\Omega \left( \sum_c \frac{\partial e_c}{\partial x_c}
 \, \sqrt{w_c} \right)^2  }
-\] +\]" src="form_2423.png"/>

    -

    or, for $w \equiv 1$:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2024-12-27 18:25:17.800935510 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2024-12-27 18:25:17.808935565 +0000 @@ -541,7 +541,7 @@

    -

    Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

    +

    Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

    For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p.

    For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

    During h-coarsening, the finite elements on siblings may be different, and their parent cell will be assigned to their least dominating finite element that belongs to its most general child. Thus, we will always interpolate on an enclosing finite element space. Additionally assuming that the finite elements on the cells to be coarsened are sufficient to represent the solution correctly (e.g. at least quadratic basis functions for a quadratic solution), we are confident to say that the error will not change by sole interpolation on the larger finite element space.

    @@ -565,7 +565,7 @@ \gamma_\text{p}^{(p_{K,\text{future}} - p_{K})}$" src="form_1575.png"/>

    On basis of the refinement history, we use the predicted error estimates to decide how cells will be adapted in the next adaptation step. Comparing the predicted error from the previous adaptation step to the error estimates of the current step allows us to justify whether our previous choice of adaptation was justified, and lets us decide how to adapt in the next one.

    -

    We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

    +

    We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

    In this context, we assume that the local error on a cell to be h-refined will be divided equally on all of its $n_{K_c}$ children, whereas local errors on siblings will be summed up on the parent cell in case of h-coarsening. This assumption is often not satisfied in practice: For example, if a cell is at a corner singularity, then the one child cell that ends up closest to the singularity will inherit the majority of the remaining error – but this function can not know where the singularity will be, and consequently assumes equal distribution.

    Incorporating the transfer from the old to the adapted mesh, the complete error prediction algorithm reads as follows:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-12-27 18:25:17.936936444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-12-27 18:25:17.940936471 +0000 @@ -949,8 +949,8 @@
    const double coordinate_value&#href_anchor"memdoc">

    Creates a (dim + 1)-dimensional point by copying over the coordinates of the incoming dim-dimensional point and setting the "missing" (dim + 1)-dimensional component to the incoming coordinate value.

    -

    For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

    -

    The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

    +

    For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

    +

    The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

    Definition at line 23 of file function_restriction.cc.

    @@ -2594,7 +2594,7 @@
    -

    Compute the polynomial interpolation of a tensor product shape function $\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
+<p>Compute the polynomial interpolation of a tensor product shape function <picture><source srcset=$\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
 \prod_{d=1}^{\text{dim}}\varphi_{i_d}^\text{1d}(x_d)$ represent a tensor product. The function returns a pair with the value of the interpolation as the first component and the gradient in reference coordinates as the second component. Note that for compound types (e.g. the values field begin a Point<spacedim> argument), the components of the gradient are sorted as Tensor<1, dim, Tensor<1, spacedim>> with the derivatives as the first index; this is a consequence of the generic arguments in the function.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-12-27 18:25:17.976936719 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-12-27 18:25:17.984936774 +0000 @@ -396,7 +396,7 @@
    const unsigned int grainsize&#href_anchor"memdoc">

    This function works a lot like the apply_to_subranges() function, but it allows to accumulate numerical results computed on each subrange into one number. The type of this number is given by the ResultType template argument that needs to be explicitly specified, and results are added up (i.e., the reduction of results from subranges happens by adding up these results).

    -

    An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

    void matrix_norm (const FullMatrix &A,
    +

    An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

    void matrix_norm (const FullMatrix &A,
    const Vector &x)
    {
    return
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-12-27 18:25:18.012936966 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-12-27 18:25:18.016936993 +0000 @@ -353,7 +353,7 @@
    if (cell->center()[1] > 0)
    cell->set_refine_flag ();
    IteratorRange< active_cell_iterator > active_cell_iterators() const
    -

    This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

    +

    This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

    In general, what you can do with operations of the form cell->something() is a bit difficult to find in the documentation because cell is not a pointer but an iterator. The functions you can call on a cell can be found in the documentation of the classes TriaAccessor (which has functions that can also be called on faces of cells or, more generally, all sorts of geometric objects that appear in a triangulation), and CellAccessor (which adds a few functions that are specific to cells).

    A more thorough description of the whole iterator concept can be found in the Iterators on mesh-like containers documentation topic.

    Different geometries

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-12-27 18:25:18.060937295 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-12-27 18:25:18.060937295 +0000 @@ -123,11 +123,11 @@
  • The plain program
  • Introduction

    -

    This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

    +

    This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

    For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

    Rather than demonstrating the use of higher order mappings with one of these more complicated examples, we do only a brief computation: calculating the value of $\pi=3.141592653589793238462643\ldots$ by two different methods.

    -

    The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

    -

    The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

    +

    The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

    +

    The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

    Note
    This tutorial shows in essence how to choose a particular mapping for integrals, by attaching a particular geometry to the triangulation (as had already been done in step-1, for example) and then passing a mapping argument to the FEValues class that is used for all integrals in deal.II. The geometry we choose is a circle, for which deal.II already has a class (SphericalManifold) that can be used. If you want to define your own geometry, for example because it is complicated and cannot be described by the classes already available in deal.II, you will want to read through step-53.

    The commented program

    The first of the following include files are probably well-known by now and need no further explanation.

    @@ -168,7 +168,7 @@
     
    void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
    -

    Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

    +

    Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

      for (unsigned int refinement = 0; refinement < 2; ++refinement)
      {
      std::cout << "Refinement level: " << refinement << std::endl;
    @@ -205,9 +205,9 @@
      }
      }
     
    -

    Now we proceed with the main part of the code, the approximation of $\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
+</div><!-- fragment --><p>Now we proceed with the main part of the code, the approximation of <picture><source srcset=$\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
    \ \textrm{det}\ J(\hat x) d\hat x \approx \sum_i \textrm{det}
-   \ J(\hat x_i)w(\hat x_i)$, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

    + \ J(\hat x_i)w(\hat x_i)$" src="form_2879.png"/>, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

      template <int dim>
      void compute_pi_by_area()
      {
    @@ -243,7 +243,7 @@
     
    @ update_JxW_values
    Transformed quadrature weights.
    -

    We employ an object of the ConvergenceTable class to store all important data like the approximated values for $\pi$ and the error with respect to the true value of $\pi$. We will also use functions provided by the ConvergenceTable class to compute convergence rates of the approximations to $\pi$.

    +

    We employ an object of the ConvergenceTable class to store all important data like the approximated values for $\pi$ and the error with respect to the true value of $\pi$. We will also use functions provided by the ConvergenceTable class to compute convergence rates of the approximations to $\pi$.

      ConvergenceTable table;
     
    @@ -291,7 +291,7 @@
      }
     
     
    -

    The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

    +

    The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

      template <int dim>
      void compute_pi_by_perimeter()
      {
    @@ -414,11 +414,11 @@
    unset ytics
    plot [-1:1][-1:1] "ball_0_mapping_q_1.dat" lw 4 lt rgb "black"

    or using one of the other filenames. The second line makes sure that the aspect ratio of the generated output is actually 1:1, i.e. a circle is drawn as a circle on your screen, rather than as an ellipse. The third line switches off the key in the graphic, as that will only print information (the filename) which is not that important right now. Similarly, the fourth and fifth disable tick marks. The plot is then generated with a specific line width ("lw", here set to 4) and line type ("lt", here chosen by saying that the line should be drawn using the RGB color "black").

    -

    The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

    +

    The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

    Five-cell discretization of the disk.
    20-cell discretization of the disk (i.e., five cells
               refined once).
    Five-cell discretization of the disk with quadratic edges. The
               boundary is nearly indistinguishable from the actual circle.
    20-cell discretization with quadratic edges.
    Five-cell discretization of the disk with cubic edges. The
-              boundary is nearly indistinguishable from the actual circle.
    20-cell discretization with cubic edges.

    These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

    + boundary is nearly indistinguishable from the actual circle." style="pointer-events: none;" width="400" height="400" class="inline"/>
    20-cell discretization with cubic edges.

    These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

    Close-up of quadratic discretization. The distance between the
          quadratic interpolant and the actual circle is small.
    Close-up of cubic discretization. The distance between the
          cubic interpolant and the actual circle is very small.

    Obviously the quadratic mapping approximates the boundary quite well, while for the cubic mapping the difference between approximated domain and true one is hardly visible already for the coarse grid. You can also see that the mapping only changes something at the outer boundaries of the triangulation. In the interior, all lines are still represented by linear functions, resulting in additional computations only on cells at the boundary. Higher order mappings are therefore usually not noticeably slower than lower order ones, because the additional computations are only performed on a small subset of all cells.

    @@ -510,14 +510,14 @@
    1280 3.1415926535897896 3.5527e-15 3.32
    5120 3.1415926535897940 8.8818e-16 2.00
    Note
    Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
    -

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    +

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error estimates for finite element approximations to eigenvalues and eigenfunctions of the Laplace-Beltrami operator", submitted, 2018.)

    Possibilities for extensions

    -

    As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

    -

    The question is whether one can do anything about this. One thought is to use a higher-precision data type. For example, one could think of declaring both the area and perimeter variables in compute_pi_by_area() and compute_pi_by_perimeter() with data type long double. long double is a data type that is not well specified in the C++ standard but at least on Intel processors has around 19, instead of around 16, digits of accuracy. If we were to do that, we would get results that differ from the ones shown above. However, maybe counter-intuitively, they are not uniformly better. For example, when computing $\pi$ by the area, at the time of writing these sentences we get these values with double precision for degree 4:

    5 3.1415871927401144 5.4608e-06 -
    +

    As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

    +

    The question is whether one can do anything about this. One thought is to use a higher-precision data type. For example, one could think of declaring both the area and perimeter variables in compute_pi_by_area() and compute_pi_by_perimeter() with data type long double. long double is a data type that is not well specified in the C++ standard but at least on Intel processors has around 19, instead of around 16, digits of accuracy. If we were to do that, we would get results that differ from the ones shown above. However, maybe counter-intuitively, they are not uniformly better. For example, when computing $\pi$ by the area, at the time of writing these sentences we get these values with double precision for degree 4:

    5 3.1415871927401144 5.4608e-06 -
    20 3.1415926314742491 2.2116e-08 7.95
    80 3.1415926535026268 8.7166e-11 7.99
    320 3.1415926535894005 3.9257e-13 7.79
    @@ -530,7 +530,7 @@
    320 3.1415926535894516 3.4157e-13 8.00
    1280 3.1415926535897918 1.5339e-15 7.80
    5120 3.1415926535897927 5.2649e-16 1.54
    -

    Indeed, here we get results that are approximately 50 times as accurate. On the other hand, when computing $\pi$ by the perimeter, we get this with double precision:

    5 3.1415921029432572 5.5065e-07 -
    +

    Indeed, here we get results that are approximately 50 times as accurate. On the other hand, when computing $\pi$ by the perimeter, we get this with double precision:

    5 3.1415921029432572 5.5065e-07 -
    20 3.1415926513737582 2.2160e-09 7.96
    80 3.1415926535810699 8.7232e-12 7.99
    320 3.1415926535897576 3.5527e-14 7.94
    @@ -542,7 +542,7 @@
    320 3.1415926535897576 3.5705e-14 7.93
    1280 3.1415926535897918 1.3785e-15 4.70
    5120 3.1415926535897944 1.3798e-15 -0.00
    -

    Here, using double precision is more accurate by about a factor of two. (Of course, in all cases, we have computed $\pi$ with more accuracy than any engineer would ever want to know.)

    +

    Here, using double precision is more accurate by about a factor of two. (Of course, in all cases, we have computed $\pi$ with more accuracy than any engineer would ever want to know.)

    What explains this unpredictability? In general, round-off errors can be thought of as random, and add up in ways that are not worth thinking too much about; we should therefore always treat any accuracy beyond, say, thirteen digits as suspect. Thus, it is probably not worth spending too much time on wondering why we get different winners and losers in the data type exchange from double and long double. The accuracy of the results is also largely not determined by the precision of the data type in which we accumulate each cell's (or face's) contributions, but the accuracy of what deal.II gives us via FEValues::JxW() and FEFaceValues::JxW(), which always uses double precision and which we cannot directly affect.

    But there are cases where one can do something about the precision, and it is worth at least mentioning the name of the most well-known algorithm in this area. Specifically, what we are doing when we add contributions into the area and perimeter values is that we are adding together positive numbers as we do here. In general, the round-off errors associated with each of these numbers is random, and if we add up contributions of substantially different sizes, then we will likely be dominated by the error in the largest contributions. One can avoid this by adding up numbers sorted by their size, and this may then result in marginally more accurate end results. The algorithm that implements this is typically called Kahan's summation algorithm. While one could play with it in the current context, it is likely not going to improve the accuracy in ways that will truly matter.

    The plain program

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-12-27 18:25:18.104937598 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-12-27 18:25:18.108937625 +0000 @@ -134,7 +134,7 @@ \int_\Omega f\; dx + \int_{\partial\Omega} g\; ds = 0. \]" src="form_2889.png"/>

    -

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    +

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    The compatibility condition allows a solution of the above equation, but it nevertheless retains an ambiguity: since only derivatives of the solution appear in the equations, the solution is only determined up to a constant. For this reason, we have to pose another condition for the numerical solution, which fixes this constant.

    For this, there are various possibilities:

    1. @@ -336,7 +336,7 @@

      That's quite simple, right?

      Two remarks are in order, though: First, these functions are used in a lot of contexts. Maybe you want to create a Laplace or mass matrix for a vector values finite element; or you want to use the default Q1 mapping; or you want to assembled the matrix with a coefficient in the Laplace operator. For this reason, there are quite a large number of variants of these functions in the MatrixCreator and MatrixTools namespaces. Whenever you need a slightly different version of these functions than the ones called above, it is certainly worthwhile to take a look at the documentation and to check whether something fits your needs.

      The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
-   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

      + 1$" src="form_2900.png"/>, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

      Since the generation of the body force contributions to the right hand side vector was so simple, we do that all over again for the boundary forces as well: allocate a vector of the right size and call the right function. The boundary function has constant values, so we can generate an object from the library on the fly, and we use the same quadrature formula as above, but this time of lower dimension since we integrate over faces now instead of cells:

        Vector<double> tmp(system_rhs.size());
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-12-27 18:25:18.152937927 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-12-27 18:25:18.156937955 +0000 @@ -157,7 +157,7 @@ u=g\quad\mbox{on }\Gamma_-, \]" src="form_2903.png"/>

      -

      on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

      +

      on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

      \[
 \Gamma_- \dealcoloneq \{{\bf x}\in\Gamma, {\mathbf \beta}({\bf x})\cdot{\bf n}({\bf x})<0\}
 \] @@ -837,7 +837,7 @@

      There are a number of strategies to stabilize the cG method, if one wants to use continuous elements for some reason. Discussing these methods is beyond the scope of this tutorial program; an interested reader could, for example, take a look at step-31.

      Possibilities for extensions

      Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

      -

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      +

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      A different direction is to observe that the solution of transport problems often has discontinuities and that therefore a mesh in which we bisect every cell in every coordinate direction may not be optimal. Rather, a better strategy would be to only cut cells in the direction parallel to the discontinuity. This is called anisotropic mesh refinement and is the subject of step-30.

      The plain program

      /* ------------------------------------------------------------------------
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-12-27 18:25:18.296938916 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-12-27 18:25:18.300938943 +0000 @@ -176,30 +176,30 @@

      The Heidelberg group of Professor Rolf Rannacher, to which the three initial authors of the deal.II library belonged during their PhD time and partly also afterwards, has been involved with adaptivity and error estimation for finite element discretizations since the mid-1990ies. The main achievement is the development of error estimates for arbitrary functionals of the solution, and of optimal mesh refinement for its computation.

      We will not discuss the derivation of these concepts in too great detail, but will implement the main ideas in the present example program. For a thorough introduction into the general idea, we refer to the seminal work of Becker and Rannacher [BR95], [BR96r], and the overview article of the same authors in Acta Numerica [BR01]; the first introduces the concept of error estimation and adaptivity for general functional output for the Laplace equation, while the second gives many examples of applications of these concepts to a large number of other, more complicated equations. For applications to individual types of equations, see also the publications by Becker [Bec95], [Bec98], Kanschat [Kan96], [FK97], Suttmeier [Sut96], [RS97], [RS98c], [RS99], Bangerth [BR99b], [Ban00w], [BR01a], [Ban02], and Hartmann [Har02], [HH01], [HH01b]. All of these works, from the original introduction by Becker and Rannacher to individual contributions to particular equations, have later been summarized in a book by Bangerth and Rannacher that covers all of these topics, see [BR03].

      The basic idea is the following: in applications, one is not usually interested in the solution per se, but rather in certain aspects of it. For example, in simulations of flow problems, one may want to know the lift or drag of a body immersed in the fluid; it is this quantity that we want to know to best accuracy, and whether the rest of the solution of the describing equations is well resolved is not of primary interest. Likewise, in elasticity one might want to know about values of the stress at certain points to guess whether maximal load values of joints are safe, for example. Or, in radiative transfer problems, mean flux intensities are of interest.

      -

      In all the cases just listed, it is the evaluation of a functional $J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      -

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      -\[
+<p>In all the cases just listed, it is the evaluation of a functional <picture><source srcset=$J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      +

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      +\[
   a(\varphi,z) = J(\varphi) \qquad \forall \varphi,
-\] +\]" src="form_2946.png"/>

      -

      where $a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      -\[
+<p> where <picture><source srcset=$a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      +\[
   J(e) = a(e,z)
-\] +\]" src="form_2949.png"/>

      and we can, by Galerkin orthogonality, rewrite this as

      -\[
+<picture><source srcset=\[
   J(e) = a(e,z-\varphi_h)
-\] +\]" src="form_2950.png"/>

      -

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      +

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      Concretely, for Laplace's equation, the error identity reads

      -\[
+<picture><source srcset=\[
   J(e) = (\nabla e, \nabla(z-\varphi_h)).
-\] +\]" src="form_2952.png"/>

      Because we want to use this formula not only to compute error, but also to refine the mesh, we need to rewrite the expression above as a sum over cells where each cell's contribution can then be used as an error indicator for this cell. Thus, we split the scalar products into terms for each cell, and integrate by parts on each of them:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (\nabla (u-u_h), \nabla (z-\varphi_h))_K
@@ -207,54 +207,54 @@
   &=&
   \sum_K (-\Delta (u-u_h), z-\varphi_h)_K
   + (\partial_n (u-u_h), z-z_h)_{\partial K}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2953.png"/>

      -

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      +

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      Thus, we have

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - (\partial_n u_h, z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2958.png"/>

      -

      In a final step, note that when taking the normal derivative of $u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      -\begin{eqnarray*}
+<p> In a final step, note that when taking the normal derivative of <picture><source srcset=$u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - \frac 12 (\partial_n u_h|_K + \partial_{n'} u_h|_{K'},
               z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2959.png"/>

      -

      Using that for the normal vectors on adjacent cells we have $n'=-n$, we define the jump of the normal derivative by

      -\[
+<p> Using that for the normal vectors on adjacent cells we have <picture><source srcset=$n'=-n$, we define the jump of the normal derivative by

      +\[
   [\partial_n u_h] \dealcoloneq \partial_n u_h|_K + \partial_{n'} u_h|_{K'}
   =
   \partial_n u_h|_K - \partial_n u_h|_{K'},
-\] +\]" src="form_2961.png"/>

      -

      and get the final form after setting the discrete function $\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      -\begin{eqnarray*}
+<p> and get the final form after setting the discrete function <picture><source srcset=$\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-I_h z)_K
   - \frac 12 ([\partial_n u_h],
               z-I_h z)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2963.png"/>

      -

      With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals $J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      -

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      -\[
+<p>With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals <picture><source srcset=$J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      +

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      +\[
   -\Delta z = \delta(x-x_0),
-\] +\]" src="form_2969.png"/>

      with the Dirac delta function on the right hand side, and the dual solution is the Green's function with respect to the point $x_0$. For simple geometries, this function is analytically known, and we could insert it into the error representation formula.

      -

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      +

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      With this, we end the discussion of the mathematical side of this program and turn to the actual implementation.

      -
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
-u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
-u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.
      +
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
+u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
+u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.

      The software

      The step-14 example program builds heavily on the techniques already used in the step-13 program. Its implementation of the dual weighted residual error estimator explained above is done by deriving a second class, properly called DualSolver, from the Solver base class, and having a class (WeightedResidual) that joins the two again and controls the solution of the primal and dual problem, and then uses both to compute the error indicator for mesh refinement.

      The program continues the modular concept of the previous example, by implementing the dual functional, describing quantity of interest, by an abstract base class, and providing two different functionals which implement this interface. Adding a different quantity of interest is thus simple.

      @@ -2587,15 +2587,15 @@

    Note the subtle interplay between resolving the corner singularities, and resolving around the point of evaluation. It will be rather difficult to generate such a mesh by hand, as this would involve to judge quantitatively how much which of the four corner singularities should be resolved, and to set the weight compared to the vicinity of the evaluation point.

    The program prints the point value and the estimated error in this quantity. From extrapolating it, we can guess that the exact value is somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get almost 6 valid digits from only 22,000 (primal) degrees of freedom. This number cannot be obtained from the value of the functional alone, but I have used the assumption that the error estimator is mostly exact, and extrapolated the computed value plus the estimated error, to get an approximation of the true value. Computing with more degrees of freedom shows that this assumption is indeed valid.

    -

    From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

    +

    From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

    Comparing refinement criteria

    -

    Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

    +

    Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

    -

    Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

    +

    Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

    Evaluation of point stresses

    Besides evaluating the values of the solution at a certain point, the program also offers the possibility to evaluate the x-derivatives at a certain point, and also to tailor mesh refinement for this. To let the program compute these quantities, simply replace the two occurrences of PointValueEvaluation in the main function by PointXDerivativeEvaluation, and let the program run:

    Refinement cycle: 0
    Number of degrees of freedom: 72
    @@ -2647,16 +2647,16 @@
    -

    Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

    -

    Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

    +

    Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

    +

    Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

    In the left part of the following chart, you again see the convergence of the error towards this extrapolated value, while on the right you see a comparison of true and estimated error:

    -

    After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

    +

    After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

    step-13 revisited

    -

    If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

    +

    If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

    First, the meshes after 9 adaptive refinement cycles obtained with the point evaluation and derivative evaluation refinement criteria, respectively, look like this:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-12-27 18:25:18.356939328 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-12-27 18:25:18.360939355 +0000 @@ -156,41 +156,41 @@

    Introduction

    Foreword

    -

    This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

    +

    This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

    Because the equation is non-linear, we can't solve it directly. Rather, we have to use Newton's method to compute the solution iteratively.

    Note
    The material presented here is also discussed in video lecture 31.5, video lecture 31.55, video lecture 31.6. (All video lectures are also available here.) (See also video lecture 31.65, video lecture 31.7.)

    Classical formulation

    In a classical sense, the problem is given in the following form:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_2984.png"/>

    -

    $\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

    -

    As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

    -\begin{align*}
+<p><picture><source srcset=$\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

    +

    As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

    +\begin{align*}
     F'(u^{n},\delta u^{n})&=- F(u^{n})
     \\
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_2988.png"/>

    with

    -\[
+<picture><source srcset=\[
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
-  \] + \]" src="form_2989.png"/>

    -

    and $F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

    -\[
+<p> and <picture><source srcset=$F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

    +\[
   F'(u,\delta u)=\lim \limits_{\epsilon \rightarrow 0}{\frac{F(u+\epsilon \delta u)-
   F(u)}{\epsilon}}.
-\] +\]" src="form_2992.png"/>

    -

    Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

    +

    Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

    -\[
+<picture><source srcset=\[
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right) +
   \nabla \cdot \left( \frac{\nabla u^{n} \cdot
@@ -198,62 +198,62 @@
   \right)  =
   -\left( - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}
   \nabla u^{n} \right) \right)
-  \] + \]" src="form_2994.png"/>

    -

    In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

    -

    Summing up, we have to solve the PDE above with the boundary condition $\delta
-u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

    -
    Note
    In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.
    +

    In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

    +

    Summing up, we have to solve the PDE above with the boundary condition $\delta
+u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

    +
    Note
    In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.

    Weak formulation of the problem

    -

    Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function $\varphi$ and integrating by parts on both sides:

    -\[
+<p>Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function <picture><source srcset=$\varphi$ and integrating by parts on both sides:

    +\[
   \left( \nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right)-\left(\nabla \varphi ,\frac{\nabla u^{n} \cdot \nabla
   \delta u^{n}}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{3}{2}}}\nabla u^{n}  \right)
   = -\left(\nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}} \nabla u^{n}
    \right).
-  \] + \]" src="form_3002.png"/>

    -

    Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
-\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

    +

    Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
+\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

    -\[
+<picture><source srcset=\[
   \delta u^{n}=\sum_{j=0}^{N-1} \delta U_{j} \varphi_{j}.
-\] +\]" src="form_3005.png"/>

    -

    Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
-{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

    +

    Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
+{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

    -\[
+<picture><source srcset=\[
   \sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right) \right] \cdot \delta U_{j}=-\left( \nabla \varphi_{i} , a_{n}
   \nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1,
-\] +\]" src="form_3007.png"/>

    -

    where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

    +

    where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

    -\[
+<picture><source srcset=\[
   A^{n}\; \delta U^{n}=b^{n},
-\] +\]" src="form_3009.png"/>

    -

    where the entries of the matrix $A^{n}$ are given by:

    +

    where the entries of the matrix $A^{n}$ are given by:

    -\[
+<picture><source srcset=\[
   A^{n}_{ij} \dealcoloneq \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right),
-\] +\]" src="form_3011.png"/>

    -

    and the right hand side $b^{n}$ is given by:

    +

    and the right hand side $b^{n}$ is given by:

    -\[
+<picture><source srcset=\[
   b^{n}_{i} \dealcoloneq -\left( \nabla \varphi_{i} , a_{n} \nabla u^{n}\right).
-\] +\]" src="form_3013.png"/>

    Questions about the appropriate solver

    The matrix that corresponds to the Newton step above can be reformulated to show its structure a bit better. Rewriting it slightly, we get that it has the form

    -\[
+<picture><source srcset=\[
   A_{ij}
   =
   \left(
@@ -261,10 +261,10 @@
     B
     \nabla \varphi_j
   \right),
-\] +\]" src="form_3014.png"/>

    -

    where the matrix $B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

    -\[
+<p> where the matrix <picture><source srcset=$B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

    +\[
   B
   =
   a_n \left\{
@@ -279,44 +279,44 @@
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}} \otimes
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}}
   \right\}.
-\] +\]" src="form_3016.png"/>

    -

    From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
-\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

    -

    It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

    +

    From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
+\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

    +

    It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

    Choice of step length and globalization

    -

    As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
-\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

    -

    A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

    +

    As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
+\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

    +

    A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

    In this program, we simply always choose the step length equal to 0.1. This makes sure that for the testcase at hand we do get convergence although it is clear that by not eventually reverting to full step lengths we forego the rapid, quadratic convergence that makes Newton's method so appealing. Obviously, this is a point one eventually has to address if the program was made into one that is meant to solve more realistic problems. We will comment on this issue some more in the results section, and use an even better approach in step-77.

    Summary of the algorithm and testcase

    Overall, the program we have here is not unlike step-6 in many regards. The layout of the main class is essentially the same. On the other hand, the driving algorithm in the run() function is different and works as follows:

    1. -

      Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in the call to AffineConstraints::distribute()). Set $n=0$.

      +

      Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in the call to AffineConstraints::distribute()). Set $n=0$.

    2. -

      Compute the Newton update by solving the system $A^{n}\;\delta
-  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

      +

      Compute the Newton update by solving the system $A^{n}\;\delta
+  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

    3. -

      Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

      +

      Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-12-27 18:25:18.408939685 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-12-27 18:25:18.412939712 +0000 @@ -153,7 +153,7 @@
      -

      The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

      +

      The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

      The testcase

      The problem we solve here is similar to step-6, with two main differences: first, the multigrid preconditioner, obviously. We also change the discontinuity of the coefficients such that the local assembler does not look more complicated than necessary.

      The commented program

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-12-27 18:25:18.496940289 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-12-27 18:25:18.500940317 +0000 @@ -167,23 +167,23 @@

      Quasistatic elastic deformation

      Motivation of the model

      In general, time-dependent small elastic deformations are described by the elastic wave equation

      -\[
+<picture><source srcset=\[
   \rho \frac{\partial^2 \mathbf{u}}{\partial t^2}
   + c \frac{\partial \mathbf{u}}{\partial t}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) = \mathbf{f}
   \qquad
   \textrm{in}\ \Omega,
-\] +\]" src="form_3065.png"/>

      -

      where $\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

      -\[
+<p> where <picture><source srcset=$\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

      +\[
   \mathbf{u}(\cdot, 0) = \mathbf{u}_0(\cdot)
   \qquad
   \textrm{on}\ \Omega,
-\] +\]" src="form_3068.png"/>

      and Dirichlet (displacement) or Neumann (traction) boundary conditions need to be specified for a unique solution:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}(\mathbf{x},t) &=& \mathbf{d}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_D\subset\partial\Omega,
@@ -191,12 +191,12 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3069.png"/>

      -

      In above formulation, $\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
-\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

      -

      The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

      -\begin{eqnarray*}
+<p> In above formulation,  <picture><source srcset=$\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
+\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

      +

      The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

      +\begin{eqnarray*}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega,
@@ -208,13 +208,13 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3075.png"/>

      -

      Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

      +

      Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

      While these equations are sufficient to describe small deformations, computing large deformations is a little more complicated and, in general, leads to nonlinear equations such as those treated in step-44. In the following, let us consider some of the tools one would employ when simulating problems in which the deformation becomes large.

      Note
      The model we will consider below is not founded on anything that would be mathematically sound: we will consider a model in which we produce a small deformation, deform the physical coordinates of the body by this deformation, and then consider the next loading step again as a linear problem. This isn't consistent, since the assumption of linearity implies that deformations are infinitesimal and so moving around the vertices of our mesh by a finite amount before solving the next linear problem is an inconsistent approach. We should therefore note that it is not surprising that the equations discussed below can't be found in the literature: The model considered here has little to do with reality! On the other hand, the implementation techniques we consider are very much what one would need to use when implementing a real model, as we will see in step-44.
      -

      To come back to defining our "artificial" model, let us first introduce a tensorial stress variable $\sigma$, and write the differential equations in terms of the stress:

      -\begin{eqnarray*}
+<p>To come back to defining our $\sigma$, and write the differential equations in terms of the stress:

      +\begin{eqnarray*}
   - \textrm{div}\  \sigma &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega(t),
@@ -226,30 +226,30 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega(t)\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3077.png"/>

      -

      Note that these equations are posed on a domain $\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

      -\[
+<p> Note that these equations are posed on a domain <picture><source srcset=$\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

      +\[
   \dot\sigma = C \varepsilon (\dot{\mathbf{u}}),
   \qquad
   \qquad
   \textrm{[stress-strain]}
-\] +\]" src="form_3080.png"/>

      -

      where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

      +

      where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

      Time discretization

      -

      Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step $n$:

      -\[
+<p>Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step <picture><source srcset=$n$:

      +\[
   -\textrm{div}\  \sigma^n = f^n,
-\] +\]" src="form_3082.png"/>

      where

      -\[
+<picture><source srcset=\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n),
-\] +\]" src="form_3083.png"/>

      -

      and $\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

      -\begin{align*}
+<p> and <picture><source srcset=$\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

      +\begin{align*}
   - \textrm{div}\   C \varepsilon(\Delta\mathbf{u}^n) &= \mathbf{f} + \textrm{div}\  \sigma^{n-1}
   \qquad
   &&\textrm{in}\ \Omega(t_{n-1}),
@@ -261,11 +261,11 @@
   \mathbf{n} \ C \varepsilon(\Delta \mathbf{u}^n(\mathbf{x},t)) &= \mathbf{b}(\mathbf{x},t_n)-\mathbf{b}(\mathbf{x},t_{n-1})
   \qquad
   &&\textrm{on}\ \Gamma_N=\partial\Omega(t_{n-1})\backslash\Gamma_D.
-\end{align*} +\end{align*}" src="form_3086.png"/>

      -

      The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find $\Delta \mathbf{u}^n \in
-\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

      -\begin{align*}
+<p> The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find  <picture><source srcset=$\Delta \mathbf{u}^n \in
+\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

      +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -277,12 +277,12 @@
   \\
   &\qquad\qquad
   \forall \varphi \in \{\mathbf{v}\in H^1(\Omega(t_{n-1}))^d: \mathbf{v}|_{\Gamma_D}=0\}.
-\end{align*} +\end{align*}" src="form_3088.png"/>

      -

      Using that $\sigma^{n-1} \mathbf{n}
+<p> Using that   <picture><source srcset=$\sigma^{n-1} \mathbf{n}
             = [C \varepsilon(\mathbf{u}^{n-1})] \mathbf{n}
-            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

      -\begin{align*}
+            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

      +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -294,32 +294,32 @@
   \qquad
   \qquad
   \textrm{[linear-system]}
-\end{align*} +\end{align*}" src="form_3090.png"/>

      -

      We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
-)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

      -

      The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

      +

      We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
+)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

      +

      The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

      There are differences, however:

      1. We have to move (update) the mesh after each time step, in order to be able to solve the next time step on a new domain;

      2. -We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.
      3. +We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.

      These two operations are done in the functions move_mesh and update_quadrature_point_history in the program. While moving the mesh is only a technicality, updating the stress is a little more complicated and will be discussed in the next section.

      Updating the stress variable

      -

      As indicated above, we need to have the stress variable $\sigma^n$ available when computing time step $n+1$, and we can compute it using

      -\[
+<p>As indicated above, we need to have the stress variable <picture><source srcset=$\sigma^n$ available when computing time step $n+1$, and we can compute it using

      +\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n).
   \qquad
   \qquad
   \textrm{[stress-update]}
-\] +\]" src="form_3099.png"/>

      -

      There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store $\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

      -

      To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

      -\[
+<p> There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store <picture><source srcset=$\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

      +

      To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

      +\[
   (\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}
   =
   \sum_{K\subset {T}}
@@ -328,12 +328,12 @@
   \sum_{K\subset {T}}
   \sum_q
   w_q \ \sigma^{n-1}(\mathbf{x}_q) : \varepsilon(\varphi(\mathbf{x}_q),
-\] +\]" src="form_3103.png"/>

      -

      where $w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

      -

      The second complication is not visible in our notation as chosen above. It is due to the fact that we compute $\Delta u^n$ on the domain $\Omega(t_{n-1})$, and then use this displacement increment to both update the stress as well as move the mesh nodes around to get to $\Omega(t_n)$ on which the next increment is computed. What we have to make sure, in this context, is that moving the mesh does not only involve moving around the nodes, but also making corresponding changes to the stress variable: the updated stress is a variable that is defined with respect to the coordinate system of the material in the old domain, and has to be transferred to the new domain. The reason for this can be understood as follows: locally, the incremental deformation $\Delta\mathbf{u}$ can be decomposed into three parts, a linear translation (the constant part of the displacement increment field in the neighborhood of a point), a dilational component (that part of the gradient of the displacement field that has a nonzero divergence), and a rotation. A linear translation of the material does not affect the stresses that are frozen into it – the stress values are simply translated along. The dilational or compressional change produces a corresponding stress update. However, the rotational component does not necessarily induce a nonzero stress update (think, in 2d, for example of the situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta
-\mathbf{u})=0$). Nevertheless, if the material was prestressed in a certain direction, then this direction will be rotated along with the material. To this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that describes, in each point the rotation due to the displacement increments. It is not hard to see that the actual dependence of $R$ on $\Delta \mathbf{u}^n$ can only be through the curl of the displacement, rather than the displacement itself or its full gradient (as mentioned above, the constant components of the increment describe translations, its divergence the dilational modes, and the curl the rotational modes). Since the exact form of $R$ is cumbersome, we only state it in the program code, and note that the correct updating formula for the stress variable is then

      -\[
+<p> where <picture><source srcset=$w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-12-27 18:25:18.580940866 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-12-27 18:25:18.584940893 +0000 @@ -161,135 +161,135 @@

      The finite element method in general, and deal.II in particular, were invented to solve partial differential equations – in other words, to solve continuum mechanics problems. On the other hand, sometimes one wants to solve problems in which it is useful to track individual objects ("particles") and how their positions evolve. If this simply leads to a set of ordinary differential equations, for example if you want to track the positions of the planets in the solar system over time, then deal.II is clearly not the right tool. On the other hand, if this evolution is due to the interaction with the solution of partial differential equations, or if having a mesh to determine which particles interact with others (such as in the smoothed particle hydrodynamics (SPH) method), then deal.II has support for you.

      The case we will consider here is how electrically charged particles move through an electric field. As motivation, we will consider cathode rays: Electrons emitted by a heated piece of metal that is negatively charged (the "cathode"), and that are then accelerated by an electric field towards the positively charged electrode (the "anode"). The anode is typically ring-shaped so that the majority of electrons can fly through the hole in the form of an electron beam. In the olden times, they might then have illuminated the screen of a TV built from a cathode ray tube. Today, instead, electron beams are useful in X-ray machines, electron beam lithography, electron beam welding, and a number of other areas.

      The equations we will then consider are as follows: First, we need to describe the electric field. This is most easily accomplished by noting that the electric potential $V$ satisfied the equation

      -\[
+<picture><source srcset=\[
   -\epsilon_0 \Delta V = \rho
-\] +\]" src="form_3134.png"/>

      -

      where $\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

      -\begin{align*}
+<p> where <picture><source srcset=$\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

      +\begin{align*}
   V &= -V_0 && \text{on}\; \Gamma_\text{cathode}\subset\partial\Omega \\
   V &= +V_0 && \text{on}\; \Gamma_\text{anode}\subset\partial\Omega \\
   \epsilon\frac{\partial V}{\partial n} &= 0
    && \text{on}\; \partial\Omega\setminus\Gamma_\text{cathode}\setminus\Gamma_\text{anode}.
-\end{align*} +\end{align*}" src="form_3136.png"/>

      -

      In other words, we prescribe voltages $+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

      -

      Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

      -\[
+<p> In other words, we prescribe voltages <picture><source srcset=$+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

      +

      Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

      +\[
   m {\ddot {\mathbf x}}_i = e\mathbf E,
-\] +\]" src="form_3141.png"/>

      -

      where $m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

      -\begin{align*}
+<p> where <picture><source srcset=$m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

      +\begin{align*}
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m}, \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i.
-\end{align*} +\end{align*}" src="form_3143.png"/>

      -

      The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

      +

      The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

      There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

      -

      First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

      +

      First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

      Second, in principle we would have to model the charge density via

      -\[
+<picture><source srcset=\[
   \rho(\mathbf x) = \sum_i e\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3148.png"/>

      -

      The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is $6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

      -\[
+<p> The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is <picture><source srcset=$6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

      +\[
   (Nm) {\ddot {\mathbf x}}_i = (Ne)\mathbf E,
-\] +\]" src="form_3152.png"/>

      -

      which is of course exactly the same as above after dividing both sides by $N$. On the other hand, the charge density for these "clumps" of electrons is given by

      -\[
+<p> which is of course exactly the same as above after dividing both sides by <picture><source srcset=$N$. On the other hand, the charge density for these "clumps" of electrons is given by

      +\[
   \rho(\mathbf x) = \sum_i (Ne)\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3153.png"/>

      -

      It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

      -

      As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

      +

      It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

      +

      As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

      Time discretization

      The equations outlined above then form a set of coupled differential equations. Let us bring them all together in one place again to make that clear:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\epsilon_0 \Delta V &= \sum_i e\delta(\mathbf x-\mathbf x_i)
   \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i,
   \\
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m} = \frac{e\mathbf \nabla V}{m}.
-\end{align*} +\end{align*}" src="form_3157.png"/>

      Because of the awkward dependence of the electric potential on the particle locations, we don't want to solve this as a coupled system but instead use a decoupled approach where we first solve for the potential in each time step and then the particle locations. (One could also do it the other way around, of course.) This is very much in the same spirit as we do in step-21, step-31, and step-32, to name just a few, and can all be understood in the context of the operator splitting methods discussed in step-58.

      -

      So, if we denote by an upper index $n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

      -\begin{align*}
+<p>So, if we denote by an upper index <picture><source srcset=$n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

      +\begin{align*}
   -\epsilon_0 \Delta V^{(n)} &= \sum_i e\delta(\mathbf x-\mathbf x_i^{(n-1)})
   \\
   \frac{{\mathbf v}_i^{(n)}-{\mathbf v}_i^{(n-1)}}{\Delta t} &= \frac{e\nabla V^{(n)}}{m}
   \\
   \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} &= {\mathbf v}_i^{(n)}.
-\end{align*} +\end{align*}" src="form_3158.png"/>

      -

      This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

      +

      This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

      There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

      -\[
+<picture><source srcset=\[
   \Delta t \le \frac{h_i}{\|\mathbf v_i\|} \qquad\qquad \forall i,
-\] +\]" src="form_3161.png"/>

      or equivalently

      -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3162.png"/>

      -

      Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

      +

      Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

      On the other hand, a particle might already be at the boundary of one cell and the neighboring cell might be once further refined. So then the time to cross that neighboring cell would actually be half the amount above, suggesting

      -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3163.png"/>

      But even that is not good enough: The formula above updates the particle positions in each time using the formula

      -\[
+<picture><source srcset=\[
 \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} = {\mathbf v}_i^{(n)},
-\] +\]" src="form_3164.png"/>

      -

      that is, using the current velocity ${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

      -\[
+<p> that is, using the <em>current</em> velocity <picture><source srcset=${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

      +\[
   \Delta t^{(n)} =
   c_\text{safety} \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i^{(n-1)}\|}.
-\] +\]" src="form_3168.png"/>

      -

      How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

      +

      How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

      There is only one other case we ought to consider: What happens in the very first time step? There, any particles to be moved along have just been created, but they have a zero velocity. So we don't know what velocity we should choose for them. Of course, in all other time steps there are also particles that have just been created, but in general, the particles with the highest velocity limit the time step size and so the newly created particles with their zero velocity don't matter. But if we only have such particles?

      -

      In that case, we can use the following approximation: If a particle starts at $\mathbf v^{(0)}=0$, then the update formula tells us that

      -\[
+<p>In that case, we can use the following approximation: If a particle starts at <picture><source srcset=$\mathbf v^{(0)}=0$, then the update formula tells us that

      +\[
   {\mathbf v}_i^{(1)} = \frac{e\nabla V^{(1)}}{m} \Delta t,
-\] +\]" src="form_3174.png"/>

      and consequently

      -\[
+<picture><source srcset=\[
     \frac{{\mathbf x}_i^{(1)}-{\mathbf x}_i^{(0)}}{\Delta t} = {\mathbf v}_i^{(1)},
-\] +\]" src="form_3175.png"/>

      which we can write as

      -\[
+<picture><source srcset=\[
     {\mathbf x}_i^{(1)} - {\mathbf x}_i^{(0)} = \frac{e\nabla V^{(1)}}{m} \Delta t^2.
-\] +\]" src="form_3176.png"/>

      -

      Not wanting to move a particle by more than $\frac 12 h_i$ then implies that we should choose the time step as

      -\[
+<p> Not wanting to move a particle by more than <picture><source srcset=$\frac 12 h_i$ then implies that we should choose the time step as

      +\[
   \Delta t
   \le
   \min_i
   \sqrt{ \frac{h_i m}{e \|\nabla V^{(1)}\| }}.
-\] +\]" src="form_3178.png"/>

      Using the same argument about neighboring cells possibly being smaller by a factor of two then leads to the final formula for time step zero:

      -\[
+<picture><source srcset=\[
   \Delta t
   =
   \min_i
   \sqrt{ \frac{\frac 12 h_i m}{e \|\nabla V^{(1)}\| } }.
-\] +\]" src="form_3179.png"/>

      -

      Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

      -

      We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

      +

      Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

      +

      We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

      Spatial discretization

      -

      Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

      +

      Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

      Dealing with particles programmatically

      Adding and moving particles is, in practice, not very difficult in deal.II. To add one, the create_particles() function of this program simply uses a code snippet of the following form:

      new_particle.set_location(location);
      @@ -302,7 +302,7 @@
      void set_reference_location(const Point< dim > &new_reference_location)
      Definition particle.h:572
      void set_id(const types::particle_index &new_id)
      Definition particle.h:599
      void set_location(const Point< spacedim > &new_location)
      Definition particle.h:545
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-12-27 18:25:18.628941196 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-12-27 18:25:18.628941196 +0000 @@ -132,14 +132,14 @@

    Introduction

    Note
    The material presented here is also discussed in video lecture 9. (All video lectures are also available here.)
    -

    The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

    +

    The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

    In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

    -

    In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

    -

    The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
-x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

    +

    In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

    +

    The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
+x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

    Enumerating degrees of freedom

    -

    Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

    +

    Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

    Defining degrees of freedom ("DoF"s in short) on a mesh is, in practice, a rather simple task, since the library does all the work for you. Essentially, all you have to do is create a finite element object (from one of the many finite element classes deal.II already has, see for example the Finite element space descriptions documentation) and give it to a DoFHandler object through the DoFHandler::distribute_dofs() function ("distributing DoFs" is the term we use to describe the process of enumerating the basis functions as discussed above). The DoFHandler is a class that knows which degrees of freedom live where, i.e., it can answer questions like "how many degrees of freedom are there globally" and "on this cell, give me the global indices of the shape functions that live here". This is the sort of information you need when determining how big your system matrix should be, and when copying the contributions of a single cell into the global matrix.

    The first task of the current program is therefore to take a mesh and a finite element, and enumerate the degrees of freedom. In the current context, this means simply giving each vertex of the mesh a DoF index. Once that has happened, we will output in a picture which vertex ended up with which DoF index. You can find the corresponding pictures in the results section of this tutorial.

    @@ -148,11 +148,11 @@

    The next step would then be to compute a matrix and right hand side corresponding to a particular differential equation using this finite element and mesh. We will keep this step for the step-3 program and rather talk about one practical aspect of a finite element program, namely that finite element matrices are always very sparse: almost all entries in these matrices are zero.

    To be more precise, we say that a matrix is sparse if the number of nonzero entries per row in the matrix is bounded by a number that is independent of the overall number of degrees of freedom. For example, the simple 5-point stencil of a finite difference approximation of the Laplace equation leads to a sparse matrix since the number of nonzero entries per row is five, and therefore independent of the total size of the matrix. For more complicated problems – say, the Stokes problem of step-22 – and in particular in 3d, the number of entries per row may be several hundred. But the important point is that this number is independent of the overall size of the problem: If you refine the mesh, the maximal number of unknowns per row remains the same.

    Sparsity is one of the distinguishing features of the finite element method compared to, say, approximating the solution of a partial differential equation using a Taylor expansion and matching coefficients, or using a Fourier basis.

    -

    In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

    -

    The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

    +

    In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

    +

    The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

    How degrees of freedom are enumerated

    By default, the DoFHandler class enumerates degrees of freedom on a mesh using an algorithm that is difficult to describe and leads to results that do look right if you know what it is doing but otherwise appears rather random; consequently, the sparsity pattern is also not optimized for any particular purpose. To show this, the code below will demonstrate a simple way to output the "sparsity pattern" that corresponds to a DoFHandler, i.e., an object that represents all of the potentially nonzero elements of a matrix one may build when discretizing a partial differential equation on a mesh and its DoFHandler. This lack of structure in the sparsity pattern will be apparent from the pictures we show below.

    -

    For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

    +

    For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

    In the program below, we will use the algorithm of Cuthill and McKee to do so. We will show the sparsity pattern for both the original enumeration of degrees of freedom and of the renumbered version below, in the results section.

    The commented program

    The first few includes are just like in the previous program, so do not require additional comments:

    @@ -288,7 +288,7 @@
     

    Renumbering of DoFs

    In the sparsity pattern produced above, the nonzero entries extended quite far off from the diagonal. For some algorithms, for example for incomplete LU decompositions or Gauss-Seidel preconditioners, this is unfavorable, and we will show a simple way how to improve this situation.

    -

    Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

    +

    Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

    This can be accomplished by a simple front marching algorithm, where one starts at a given vertex and gives it the index zero. Then, its neighbors are numbered successively, making their indices close to the original one. Then, their neighbors, if not yet numbered, are numbered, and so on.

    One algorithm that adds a little bit of sophistication along these lines is the one by Cuthill and McKee. We will use it in the following function to renumber the degrees of freedom such that the resulting sparsity pattern is more localized around the diagonal. The only interesting part of the function is the first call to DoFRenumbering::Cuthill_McKee, the rest is essentially as before:

      void renumber_dofs(DoFHandler<2> &dof_handler)
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-12-27 18:25:18.700941690 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-12-27 18:25:18.704941717 +0000 @@ -167,13 +167,13 @@ p &=& g \qquad {\textrm{on}\ }\partial\Omega. \end{eqnarray*}" src="form_3206.png"/>

    -

    $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

    +

    $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

    After discussing the equation and the formulation we are going to use to solve it, this introduction will cover the use of block matrices and vectors, the definition of solvers and preconditioners, and finally the actual test case we are going to solve.

    We are going to extend this tutorial program in step-21 to solve not only the mixed Laplace equation, but add another equation that describes the transport of a mixture of two fluids.

    The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

    The equations

    In the form above, the Poisson equation (i.e., the Laplace equation with a nonzero right hand side) is generally considered a good model equation for fluid flow in porous media. Of course, one typically models fluid flow through the Navier-Stokes equations or, if fluid velocities are slow or the viscosity is large, the Stokes equations (which we cover in step-22). In the first of these two models, the forces that act are inertia and viscous friction, whereas in the second it is only viscous friction – i.e., forces that one fluid particle exerts on a nearby one. This is appropriate if you have free flow in a large domain, say a pipe, a river, or in the air. On the other hand, if the fluid is confined in pores, then friction forces exerted by the pore walls on the fluid become more and more important and internal viscous friction becomes less and less important. Modeling this then first leads to the Brinkman model if both effects are important, and in the limit of very small pores to the Darcy equations. The latter is just a different name for the Poisson or Laplace equation, connotating it with the area to which one wants to apply it: slow flow in a porous medium. In essence it says that the velocity is proportional to the negative pressure gradient that drives the fluid through the porous medium.

    -

    The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

    +

    The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

    In the applications named above, a desirable feature for a numerical scheme is that it should be locally conservative, i.e., that whatever flows into a cell also flows out of it (or the difference is equal to the integral over the source terms over each cell, if the sources are nonzero). However, as it turns out, the usual discretizations of the Laplace equation (such as those used in step-3, step-4, or step-6) do not satisfy this property. But, one can achieve this by choosing a different formulation of the problem and a particular combination of finite element spaces.

    Formulation, weak form, and discrete problem

    To this end, one first introduces a second variable, called the velocity, ${\mathbf u}=-K\nabla p$. By its definition, the velocity is a vector in the negative direction of the pressure gradient, multiplied by the permeability tensor. If the permeability tensor is proportional to the unit matrix, this equation is easy to understand and intuitive: the higher the permeability, the higher the velocity; and the velocity is proportional to the gradient of the pressure, going from areas of high pressure to areas of low pressure (thus the negative sign).

    @@ -202,15 +202,15 @@ \end{eqnarray*}" src="form_3217.png"/>

    Here, ${\mathbf n}$ is the outward normal vector at the boundary. Note how in this formulation, Dirichlet boundary values of the original problem are incorporated in the weak form.

    -

    To be well-posed, we have to look for solutions and test functions in the space $H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

    -

    To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
+<p>To be well-posed, we have to look for solutions and test functions in the space <picture><source srcset=$H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

    +

    To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
 u}_h,p_h$ so that

    \begin{eqnarray*}
   A(\{{\mathbf u}_h,p_h\},\{{\mathbf v}_h,q_h\}) = F(\{{\mathbf v}_h,q_h\})
   \qquad\qquad \forall {\mathbf v}_h,q_h.
 \end{eqnarray*}

    -

    Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

    +

    Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

    \begin{eqnarray*}
   - (1,{\textrm{div}}\ {\mathbf u}_h)_K
   =
@@ -233,7 +233,7 @@
 \end{eqnarray*}

    If you now recall that ${\mathbf u}$ was the velocity, then the integral on the left is exactly the (discrete) flux across the boundary of the cell $K$. The statement is then that the flux must be equal to the integral over the sources within $K$. In particular, if there are no sources (i.e., $f=0$ in $K$), then the statement is that total flux is zero, i.e., whatever flows into a cell must flow out of it through some other part of the cell boundary. This is what we call local conservation because it holds for every cell.

    -

    On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

    +

    On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

    Assembling the linear system

    The deal.II library (of course) implements Raviart-Thomas elements $RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

    \begin{eqnarray*}
@@ -241,8 +241,8 @@
 \end{eqnarray*}

    with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

    -

    Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

    -

    This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

    +

    Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

    +

    This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

    So what do we have to do for non-primitive elements? To figure this out, let us go back in the tutorial programs, almost to the very beginnings. There, we learned that we use the FEValues class to determine the values and gradients of shape functions at quadrature points. For example, we would call fe_values.shape_value(i,q_point) to obtain the value of the ith shape function on the quadrature point with number q_point. Later, in step-8 and other tutorial programs, we learned that this function call also works for vector-valued shape functions (of primitive finite elements), and that it returned the value of the only non-zero component of shape function i at quadrature point q_point.

    For non-primitive shape functions, this is clearly not going to work: there is no single non-zero vector component of shape function i, and the call to fe_values.shape_value(i,q_point) would consequently not make much sense. However, deal.II offers a second function call, fe_values.shape_value_component(i,q_point,comp) that returns the value of the compth vector component of shape function i at quadrature point q_point, where comp is an index between zero and the number of vector components of the present finite element; for example, the element we will use to describe velocities and pressures is going to have $dim+1$ components. It is worth noting that this function call can also be used for primitive shape functions: it will simply return zero for all components except one; for non-primitive shape functions, it will in general return a non-zero value for more than just one component.

    We could now attempt to rewrite the bilinear form above in terms of vector components. For example, in 2d, the first term could be rewritten like this (note that $u_0=x_0, u_1=x_1, p=x_2$):

    @@ -276,7 +276,7 @@

    fe_values.shape_value_component(j,q,1)
    ) *
    fe_values.JxW(q);
    -

    This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

    +

    This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

    const FEValuesExtractors::Vector velocities (0);
    const FEValuesExtractors::Scalar pressure (dim);
    @@ -354,8 +354,8 @@

    You will find the exact same code as above in the sources for the present program. We will therefore not comment much on it below.

    Linear solvers and preconditioners

    After assembling the linear system we are faced with the task of solving it. The problem here is that the matrix possesses two undesirable properties:

      -
    • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
    • -
    • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).
    • +
    • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
    • +
    • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).

    At least it is symmetric, but the first issue above still means that the Conjugate Gradient method is not going to work since it is only applicable to problems in which the matrix is symmetric and positive definite. We would have to resort to other iterative solvers instead, such as MinRes, SymmLQ, or GMRES, that can deal with indefinite systems. However, then the next problem immediately surfaces: Due to the zero block, there are zeros on the diagonal and none of the usual, "simple" preconditioners (Jacobi, SSOR) will work as they require division by diagonal elements.

    For the matrix sizes we expect to run with this program, the by far simplest approach would be to just use a direct solver (in particular, the SparseDirectUMFPACK class that is bundled with deal.II). step-29 goes this route and shows that solving any linear system can be done in just 3 or 4 lines of code.

    @@ -375,24 +375,24 @@ \end{array}\right), \end{eqnarray*}" src="form_3250.png"/>

    -

    where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

    +

    where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

    By block elimination, we can then re-order this system in the following way (multiply the first row of the system by $B^TM^{-1}$ and then subtract the second row from it):

    \begin{eqnarray*}
   B^TM^{-1}B P &=& B^TM^{-1} F - G, \\
   MU &=& F - BP.
 \end{eqnarray*}

    -

    Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

    -

    Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

      +

      Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

      +

      Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

      1. compute $w = B v$;
      2. -solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
      3. +solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
      4. compute $z=B^Ty$ to obtain $z=Sv$.

      Note how we evaluate the expression $B^TM^{-1}Bv$ right to left to avoid matrix-matrix products; this way, all we have to do is evaluate matrix-vector products.

      -

      In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

      +

      In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

      Note
      The key point in this consideration is to recognize that to implement an iterative solver such as CG or GMRES, we never actually need the actual elements of a matrix! All that is required is that we can form matrix-vector products. The same is true for preconditioners. In deal.II we encode this requirement by only requiring that matrices and preconditioners given to solver classes have a vmult() member function that does the matrix-vector product. How a class chooses to implement this function is not important to the solver. Consequently, classes can implement it by, for example, doing a sequence of products and linear solves as discussed above.

      The LinearOperator framework in deal.II

      deal.II includes support for describing such linear operations in a very general way. This is done with the LinearOperator class that, like the MatrixType concept, defines a minimal interface for applying a linear operation to a vector:

      std::function<void(Range &, const Domain &)> vmult;
      @@ -416,10 +416,10 @@
      LinearOperator< Range, Domain, Payload > linear_operator(const OperatorExemplar &, const Matrix &)
      LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)

      Rather than using a SolverControl we use the ReductionControl class here that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-18}$) or when the residual is reduced by a certain factor (here, $10^{-10}$). In contrast the SolverControl class only checks for absolute tolerances. We have to use ReductionControl in our case to work around a minor issue: The right hand sides that we will feed to op_M_inv are essentially formed by residuals that naturally decrease vastly in norm as the outer iterations progress. This makes control by an absolute tolerance very error prone.

      -

      We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

      const auto op_B = linear_operator(B);
      +

      We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

      const auto op_B = linear_operator(B);
      const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
      LinearOperator< Domain, Range, Payload > transpose_operator(const LinearOperator< Range, Domain, Payload > &op)
      -

      Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

      B.vmult (tmp1, src); // multiply with the top right block: B
      +

      Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

      B.vmult (tmp1, src); // multiply with the top right block: B
      solver_M(M, tmp2, tmp1, preconditioner_M); // multiply with M^-1
      B.Tvmult (dst, tmp2); // multiply with the bottom left block: B^T

      (tmp1 and tmp2 are two temporary vectors). The key point behind this approach is the fact that we never actually create an inner product of matrices. Instead, whenever we have to perform a matrix vector multiplication with op_S we simply run all individual vmult operations in above sequence.

      @@ -438,10 +438,10 @@
      }
      };
      -
      Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
      const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
      +
      Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
      const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
      const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
      The manual approach on the other hand obscures this fact.
    -

    All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

    Vector<double> schur_rhs (P.size());
    +

    All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

    Vector<double> schur_rhs (P.size());
    Vector<double> tmp (U.size());
    op_M_inv.vmult (tmp, F);
    transpose_operator(op_B).vmult (schur_rhs, tmp);
    @@ -450,7 +450,7 @@
    std::function<void(Range &)> apply_add;

    The class allows lazy evaluation of expressions involving vectors and linear operators. This is done by storing the computational expression and only performing the computation when either the object is converted to a vector object, or PackagedOperation::apply() (or PackagedOperation::apply_add()) is invoked by hand. Assuming that F and G are the two vectors of the right hand side we can simply write:

    const auto schur_rhs = transpose_operator(op_B) * op_M_inv * F - G;

    Here, schur_rhs is a PackagedOperation that records the computation we specified. It does not create a vector with the actual result immediately.

    -

    With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

    SolverControl solver_control_S(2000, 1.e-12);
    +

    With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

    SolverControl solver_control_S(2000, 1.e-12);
    SolverCG<Vector<double>> solver_S(solver_control_S);
    PreconditionIdentity preconditioner_S;
    @@ -463,16 +463,16 @@
    Note
    The functionality that we developed in this example step by hand is already readily available in the library. Have a look at schur_complement(), condense_schur_rhs(), and postprocess_schur_solution().

    A preconditioner for the Schur complement

    One may ask whether it would help if we had a preconditioner for the Schur complement $S=B^TM^{-1}B$. The general answer, as usual, is: of course. The problem is only, we don't know anything about this Schur complement matrix. We do not know its entries, all we know is its action. On the other hand, we have to realize that our solver is expensive since in each iteration we have to do one matrix-vector product with the Schur complement, which means that we have to do invert the mass matrix once in each iteration.

    -

    There are different approaches to preconditioning such a matrix. On the one extreme is to use something that is cheap to apply and therefore has no real impact on the work done in each iteration. The other extreme is a preconditioner that is itself very expensive, but in return really brings down the number of iterations required to solve with $S$.

    +

    There are different approaches to preconditioning such a matrix. On the one extreme is to use something that is cheap to apply and therefore has no real impact on the work done in each iteration. The other extreme is a preconditioner that is itself very expensive, but in return really brings down the number of iterations required to solve with $S$.

    We will try something along the second approach, as much to improve the performance of the program as to demonstrate some techniques. To this end, let us recall that the ideal preconditioner is, of course, $S^{-1}$, but that is unattainable. However, how about

    \begin{eqnarray*}
   \tilde S^{-1} = [B^T ({\textrm{diag}\ }M)^{-1}B]^{-1}
 \end{eqnarray*}

    -

    as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

    -

    Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

    const auto op_aS =
    +

    as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

    +

    Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

    const auto op_aS =
    transpose_operator(op_B) * linear_operator(preconditioner_M) * op_B;
    -

    Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

    +

    Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

    With all this we almost have the preconditioner completed: it should be the inverse of the approximate Schur complement. We implement this again by creating a linear operator with inverse_operator() function. This time however we would like to choose a relatively modest tolerance for the CG solver (that inverts op_aS). The reasoning is that op_aS is only coarse approximation to op_S, so we actually do not need to invert it exactly. This, however creates a subtle problem: preconditioner_S will be used in the final outer CG iteration to create an orthogonal basis. But for this to work, it must be precisely the same linear operation for every invocation. We ensure this by using an IterationNumberControl that allows us to fix the number of CG iterations that are performed to a fixed small number (in our case 30):

    IterationNumberControl iteration_number_control_aS(30, 1.e-18);
    SolverCG<Vector<double>> solver_aS(iteration_number_control_aS);
    PreconditionIdentity preconditioner_aS;
    @@ -732,7 +732,7 @@
     
    void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())

    The next thing is that we want to figure out the sizes of these blocks so that we can allocate an appropriate amount of space. To this end, we call the DoFTools::count_dofs_per_fe_component() function that counts how many shape functions are non-zero for a particular vector component. We have dim+1 vector components, and DoFTools::count_dofs_per_fe_component() will count how many shape functions belong to each of these components.

    -

    There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

    +

    There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

    Using this knowledge, we can get the number of velocity shape functions from any of the first dim elements of dofs_per_component, and then use this below to initialize the vector and matrix block sizes, as well as create output.

    Note
    If you find this concept difficult to understand, you may want to consider using the function DoFTools::count_dofs_per_fe_block() instead, as we do in the corresponding piece of code in step-22. You might also want to read up on the difference between blocks and components in the glossary.
      const std::vector<types::global_dof_index> dofs_per_component =
    @@ -1084,7 +1084,7 @@
      }

    Results

    Output of the program and graphical visualization

    -

    If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

    \$ make run
    +

    If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

    \$ make run
     [ 66%] Built target step-20
     Scanning dependencies of target run
     [100%] Run step-20 with Release configuration
    @@ -1103,7 +1103,7 @@
     

    As an additional remark, note how the x-velocity in the left image is only continuous in x-direction, whereas the y-velocity is continuous in y-direction. The flow fields are discontinuous in the other directions. This very obviously reflects the continuity properties of the Raviart-Thomas elements, which are, in fact, only in the space H(div) and not in the space $H^1$. Finally, the pressure field is completely discontinuous, but that should not surprise given that we have chosen FE_DGQ(0) as the finite element for that solution component.

    Convergence

    The program offers two obvious places where playing and observing convergence is in order: the degree of the finite elements used (passed to the constructor of the MixedLaplaceProblem class from main()), and the refinement level (determined in MixedLaplaceProblem::make_grid_and_dofs). What one can do is to change these values and observe the errors computed later on in the course of the program run.

    -

    If one does this, one finds the following pattern for the $L_2$ error in the pressure variable:

    +

    If one does this, one finds the following pattern for the $L_2$ error in the pressure variable:

    @@ -1126,7 +1126,7 @@
    Finite element order
    $O(h)$ $O(h^2)$ $O(h^3)$

    The theoretically expected convergence orders are very nicely reflected by the experimentally observed ones indicated in the last row of the table.

    -

    One can make the same experiment with the $L_2$ error in the velocity variables:

    +

    One can make the same experiment with the $L_2$ error in the velocity variables:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 2024-12-27 18:25:18.796942349 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 2024-12-27 18:25:18.800942377 +0000 @@ -168,7 +168,7 @@

    The equations covered here are an extension of the material already covered in step-20. In particular, they fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

    The two phase flow problem

    Modeling of two phase flow in porous media is important for both environmental remediation and the management of petroleum and groundwater reservoirs. Practical situations involving two phase flow include the dispersal of a nonaqueous phase liquid in an aquifer, or the joint movement of a mixture of fluids such as oil and water in a reservoir. Simulation models, if they are to provide realistic predictions, must accurately account for these effects.

    -

    To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

    +

    To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

    The velocity with which molecules of each of the two phases move is determined by Darcy's law that states that the velocity is proportional to the pressure gradient:

    \begin{eqnarray*}
   \mathbf{u}_{j}
@@ -176,7 +176,7 @@
   -\frac{k_{rj}(S)}{\mu_{j}} \mathbf{K} \cdot \nabla p
 \end{eqnarray*}

    -

    where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

    +

    where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

    We combine Darcy's law with the statement of conservation of mass for each phase,

    \[
   \textrm{div}\ \mathbf{u}_{j} = q_j,
@@ -187,7 +187,7 @@
 - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p)= q.
 \end{eqnarray*}

    -

    Here, $q$ is the sum source term, and

    +

    Here, $q$ is the sum source term, and

    \[
   \lambda(S) = \frac{k_{rw}(S)}{\mu_{w}}+\frac{k_{ro}(S)}{\mu_{o}}
 \] @@ -231,7 +231,7 @@

    Note that the advection equation contains the term $\mathbf{u} \cdot \nabla
 F(S)$ rather than $\mathbf{u} \cdot \nabla S$ to indicate that the saturation is not simply transported along; rather, since the two phases move with different velocities, the saturation can actually change even in the advected coordinate system. To see this, rewrite $\mathbf{u} \cdot \nabla F(S)
-= \mathbf{u} F'(S) \cdot \nabla S$ to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

    += \mathbf{u} F'(S) \cdot \nabla S$" src="form_3302.png"/> to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

    In summary, what we get are the following two equations:

    \begin{eqnarray*}
   - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p) &=& q
@@ -241,7 +241,7 @@
   \qquad \textrm{in}\ \Omega\times[0,T].
 \end{eqnarray*}

    -

    Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

    +

    Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

    This set of equations has a peculiar character: one of the two equations has a time derivative, the other one doesn't. This corresponds to the character that the pressure and velocities are coupled through an instantaneous constraint, whereas the saturation evolves over finite time scales.

    Such systems of equations are called Differential Algebraic Equations (DAEs), since one of the equations is a differential equation, the other is not (at least not with respect to the time variable) and is therefore an "algebraic" equation. (The notation comes from the field of ordinary differential equations, where everything that does not have derivatives with respect to the time variable is necessarily an algebraic equation.) This class of equations contains pretty well-known cases: for example, the time dependent Stokes and Navier-Stokes equations (where the algebraic constraint is that the divergence of the flow field, $\textrm{div}\ \mathbf u$, must be zero) as well as the time dependent Maxwell equations (here, the algebraic constraint is that the divergence of the electric displacement field equals the charge density, $\textrm{div}\ \mathbf D = \rho$ and that the divergence of the magnetic flux density is zero: $\textrm{div}\ \mathbf
 B = 0$); even the quasistatic model of step-18 falls into this category. We will see that the different character of the two equations will inform our discretization strategy for the two equations.

    @@ -263,7 +263,7 @@

    where $\triangle t$ is the length of a time step. Note how we solve the implicit pressure-velocity system that only depends on the previously computed saturation $S^n$, and then do an explicit time step for $S^{n+1}$ that only depends on the previously known $S^n$ and the just computed $\mathbf{u}^{n+1}$. This way, we never have to iterate for the nonlinearities of the system as we would have if we used a fully implicit method. (In a more modern perspective, this should be seen as an "operator splitting" method. step-58 has a long description of the idea behind this.)

    -

    We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

    +

    We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

    \begin{eqnarray*}
   \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{u}^{n+1},\mathbf v\right)_\Omega -
   (p^{n+1}, \nabla\cdot\mathbf v)_\Omega &=&
@@ -272,7 +272,7 @@
   (\nabla \cdot\mathbf{u}^{n+1}, \phi)_\Omega &=& (q^{n+1},\phi)_\Omega
 \end{eqnarray*}

    -

    Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

    +

    Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

    For the saturation equation, we obtain after integrating by parts

    \begin{eqnarray*}
   (S^{n+1}, \sigma)_\Omega
@@ -306,7 +306,7 @@
 </p>
 <p> We introduce an object of type <a class=DiscreteTime in order to keep track of the current value of time and time step in the code. This class encapsulates many complexities regarding adjusting time step size and stopping at a specified final time.

    Space discretization

    -

    In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

    +

    In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

    Since we have discontinuous spaces, we have to think about how to evaluate terms on the interfaces between cells, since discontinuous functions are not really defined there. In particular, we have to give a meaning to the last term on the left hand side of the saturation equation. To this end, let us define that we want to evaluate it in the following sense:

    \begin{eqnarray*}
   &&\left(F(S^n) (\mathbf n \cdot \mathbf{u}^{n+1}), \sigma\right)_{\partial K}
@@ -320,7 +320,7 @@
 <p> where <picture><source srcset=$\partial K_{-} \dealcoloneq \{x\in \partial K, \mathbf{u}(x) \cdot \mathbf{n}<0\}$ denotes the inflow boundary and $\partial K_{+} \dealcoloneq \{\partial K \setminus
 \partial K_{-}\}$ is the outflow part of the boundary. The quantities $S_+,\mathbf{u}_+$ then correspond to the values of these variables on the present cell, whereas $S_-,\mathbf{u}_-$ (needed on the inflow part of the boundary of $K$) are quantities taken from the neighboring cell. Some more context on discontinuous element techniques and evaluation of fluxes can also be found in step-12.

    Linear solvers

    -

    The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

    +

    The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

    \[
 \left(
 \begin{array}{ccc}
@@ -342,7 +342,7 @@
 \right)
 \]

    -

    where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

    +

    where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

    \begin{eqnarray*}
 M^u(S^n)_{ij} &=&
 \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{v}_i,\mathbf
@@ -372,7 +372,7 @@
 (S^n,\phi_i)_\Omega +\triangle t \sum_K  \left(F(S^n) q^{n+1}, \phi_i\right)_K.
 \end{eqnarray*}

    -
    Note
    Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.
    +
    Note
    Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.

    The system above presents a complication: Since the matrix $H_{ij}$ depends on $\mathbf u^{n+1}$ implicitly (the velocities are needed to determine which parts of the boundaries $\partial K$ of cells are influx or outflux parts), we can only assemble this matrix after we have solved for the velocities.

    The solution scheme then involves the following steps:

    1. @@ -409,7 +409,7 @@

      For simplicity, this program assumes that there is no source, $q=0$, and that the heterogeneous porous medium is isotropic $\mathbf{K}(\mathbf{x}) =
 k(\mathbf{x}) \mathbf{I}$. The first one of these is a realistic assumption in oil reservoirs: apart from injection and production wells, there are usually no mechanisms for fluids to appear or disappear out of the blue. The second one is harder to justify: on a microscopic level, most rocks are isotropic, because they consist of a network of interconnected pores. However, this microscopic scale is out of the range of today's computer simulations, and we have to be content with simulating things on the scale of meters. On that scale, however, fluid transport typically happens through a network of cracks in the rock, rather than through pores. However, cracks often result from external stress fields in the rock layer (for example from tectonic faulting) and the cracks are therefore roughly aligned. This leads to a situation where the permeability is often orders of magnitude larger in the direction parallel to the cracks than perpendicular to the cracks. A problem typically faces in reservoir simulation, however, is that the modeler doesn't know the direction of cracks because oil reservoirs are not accessible to easy inspection. The only solution in that case is to assume an effective, isotropic permeability.

      Whatever the matter, both of these restrictions, no sources and isotropy, would be easy to lift with a few lines of code in the program.

      -

      Next, for simplicity, our numerical simulation will be done on the unit cell $\Omega = [0,1]\times [0,1]$ for $t\in [0,T]$. Our initial conditions are $S(\mathbf{x},0)=0$; in the oil reservoir picture, where $S$ would indicate the water saturation, this means that the reservoir contains pure oil at the beginning. Note that we do not need any initial conditions for pressure or velocity, since the equations do not contain time derivatives of these variables. Finally, we impose the following pressure boundary conditions:

      +

      Next, for simplicity, our numerical simulation will be done on the unit cell $\Omega = [0,1]\times [0,1]$ for $t\in [0,T]$. Our initial conditions are $S(\mathbf{x},0)=0$; in the oil reservoir picture, where $S$ would indicate the water saturation, this means that the reservoir contains pure oil at the beginning. Note that we do not need any initial conditions for pressure or velocity, since the equations do not contain time derivatives of these variables. Finally, we impose the following pressure boundary conditions:

      \[
   p(\mathbf{x},t)=1-x_1 \qquad \textrm{on}\ \partial\Omega.
 \] @@ -439,7 +439,7 @@ \]" src="form_3349.png"/>

      Note
      Coming back to this testcase in step-43 several years later revealed an oddity in the setup of this testcase. To this end, consider that we can rewrite the advection equation for the saturation as $S_{t} + (\mathbf{u}
-F'(S)) \cdot \nabla S = 0$. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.
      +F'(S)) \cdot \nabla S = 0$" src="form_3350.png"/>. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.

      Finally, to come back to the description of the testcase, we will show results for computations with the two permeability functions introduced at the end of the results section of step-20:

      • A function that models a single, winding crack that snakes through the domain. In analogy to step-20, but taking care of the slightly different geometry we have here, we describe this by the following function:

        @@ -464,7 +464,7 @@ e^{-\left(\frac{|\mathbf{x}-\mathbf{x}_i|}{0.05}\right)^2}, \end{eqnarray*}" src="form_3356.png"/>

        - where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.
      • + where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.

      The commented program

      This program is an adaptation of step-20 and includes some technique of DG methods from step-12. A good part of the program is therefore very similar to step-20 and we will not comment again on these parts. Only the new stuff will be discussed in more detail.

      @@ -523,7 +523,7 @@
    2. project_back_saturation resets all saturation degrees of freedom with values less than zero to zero, and all those with saturations greater than one to one.
    3. -

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

      +

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

        template <int dim>
        class TwoPhaseFlowProblem
        {
      @@ -877,7 +877,7 @@

      TwoPhaseFlowProblem class implementation

      Here now the implementation of the main class. Much of it is actually copied from step-20, so we won't comment on it in much detail. You should try to get familiar with that program first, then most of what is happening here should be mostly clear.

      TwoPhaseFlowProblem::TwoPhaseFlowProblem

      -

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

      +

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

        template <int dim>
        TwoPhaseFlowProblem<dim>::TwoPhaseFlowProblem(const unsigned int degree)
        : degree(degree)
      @@ -1131,8 +1131,8 @@
        fe_values.get_function_values(old_solution, old_solution_values);
        fe_values.get_function_values(solution, present_solution_values);
       
      -

      First for the cell terms. These are, following the formulas in the introduction, $(S^n,\sigma)-(F(S^n) \mathbf{v}^{n+1},\nabla
-   \sigma)$, where $\sigma$ is the saturation component of the test function:

      +

      First for the cell terms. These are, following the formulas in the introduction, $(S^n,\sigma)-(F(S^n) \mathbf{v}^{n+1},\nabla
+   \sigma)$, where $\sigma$ is the saturation component of the test function:

        for (unsigned int q = 0; q < n_q_points; ++q)
        for (unsigned int i = 0; i < dofs_per_cell; ++i)
        {
      @@ -1421,7 +1421,7 @@
      void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >()), const bool project_to_boundary_first=false)

      The main function

      -

      That's it. In the main function, we pass the degree of the finite element space to the constructor of the TwoPhaseFlowProblem object. Here, we use zero-th degree elements, i.e. $RT_0\times DQ_0 \times DQ_0$. The rest is as in all the other programs.

      +

      That's it. In the main function, we pass the degree of the finite element space to the constructor of the TwoPhaseFlowProblem object. Here, we use zero-th degree elements, i.e. $RT_0\times DQ_0 \times DQ_0$. The rest is as in all the other programs.

        int main()
        {
        try
      @@ -1483,10 +1483,10 @@
      ...

      As we can see, the time step is pretty much constant right from the start, which indicates that the velocities in the domain are not strongly dependent on changes in saturation, although they certainly are through the factor $\lambda(S)$ in the pressure equation.

      Our second observation is that the number of CG iterations needed to solve the pressure Schur complement equation drops from 22 to 17 between the first and the second time step (in fact, it remains around 17 for the rest of the computations). The reason is actually simple: Before we solve for the pressure during a time step, we don't reset the solution variable to zero. The pressure (and the other variables) therefore have the previous time step's values at the time we get into the CG solver. Since the velocities and pressures don't change very much as computations progress, the previous time step's pressure is actually a good initial guess for this time step's pressure. Consequently, the number of iterations we need once we have computed the pressure once is significantly reduced.

      -

      The final observation concerns the number of iterations needed to solve for the saturation, i.e. one. This shouldn't surprise us too much: the matrix we have to solve with is the mass matrix. However, this is the mass matrix for the $DGQ_0$ element of piecewise constants where no element couples with the degrees of freedom on neighboring cells. The matrix is therefore a diagonal one, and it is clear that we should be able to invert this matrix in a single CG iteration.

      +

      The final observation concerns the number of iterations needed to solve for the saturation, i.e. one. This shouldn't surprise us too much: the matrix we have to solve with is the mass matrix. However, this is the mass matrix for the $DGQ_0$ element of piecewise constants where no element couples with the degrees of freedom on neighboring cells. The matrix is therefore a diagonal one, and it is clear that we should be able to invert this matrix in a single CG iteration.

      With all this, here are a few movies that show how the saturation progresses over time. First, this is for the single crack model, as implemented in the SingleCurvingCrack::KInverse class:

      -

      As can be seen, the water rich fluid snakes its way mostly along the high-permeability zone in the middle of the domain, whereas the rest of the domain is mostly impermeable. This and the next movie are generated using n_refinement_steps=7, leading to a $128\times 128$ mesh with some 16,000 cells and about 66,000 unknowns in total.

      +

      As can be seen, the water rich fluid snakes its way mostly along the high-permeability zone in the middle of the domain, whereas the rest of the domain is mostly impermeable. This and the next movie are generated using n_refinement_steps=7, leading to a $128\times 128$ mesh with some 16,000 cells and about 66,000 unknowns in total.

      The second movie shows the saturation for the random medium model of class RandomMedium::KInverse, where we have randomly distributed centers of high permeability and fluid hops from one of these zones to the next:

      Finally, here is the same situation in three space dimensions, on a mesh with n_refinement_steps=5, which produces a mesh of some 32,000 cells and 167,000 degrees of freedom:

      @@ -1494,24 +1494,24 @@

      To repeat these computations, all you have to do is to change the line

      TwoPhaseFlowProblem<2> two_phase_flow_problem(0);

      in the main function to

      TwoPhaseFlowProblem<3> two_phase_flow_problem(0);

      The visualization uses a cloud technique, where the saturation is indicated by colored but transparent clouds for each cell. This way, one can also see somewhat what happens deep inside the domain. A different way of visualizing would have been to show isosurfaces of the saturation evolving over time. There are techniques to plot isosurfaces transparently, so that one can see several of them at the same time like the layers of an onion.

      -

      So why don't we show such isosurfaces? The problem lies in the way isosurfaces are computed: they require that the field to be visualized is continuous, so that the isosurfaces can be generated by following contours at least across a single cell. However, our saturation field is piecewise constant and discontinuous. If we wanted to plot an isosurface for a saturation $S=0.5$, chances would be that there is no single point in the domain where that saturation is actually attained. If we had to define isosurfaces in that context at all, we would have to take the interfaces between cells, where one of the two adjacent cells has a saturation greater than and the other cell a saturation less than 0.5. However, it appears that most visualization programs are not equipped to do this kind of transformation.

      +

      So why don't we show such isosurfaces? The problem lies in the way isosurfaces are computed: they require that the field to be visualized is continuous, so that the isosurfaces can be generated by following contours at least across a single cell. However, our saturation field is piecewise constant and discontinuous. If we wanted to plot an isosurface for a saturation $S=0.5$, chances would be that there is no single point in the domain where that saturation is actually attained. If we had to define isosurfaces in that context at all, we would have to take the interfaces between cells, where one of the two adjacent cells has a saturation greater than and the other cell a saturation less than 0.5. However, it appears that most visualization programs are not equipped to do this kind of transformation.

      Possibilities for extensions

      There are a number of areas where this program can be improved. Three of them are listed below. All of them are, in fact, addressed in a tutorial program that forms the continuation of the current one: step-43.

      Solvers

      -

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      -

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      -

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
-B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      -

      Finally, why use the approximation $B^T (\textrm{diag}(M^u(S)))^{-1} B$ to precondition $B^T M^u(S)^{-1} B$? The latter matrix, after all, is the mixed form of the Laplace operator on the pressure space, for which we use linear elements. We could therefore build a separate matrix $A^p$ on the side that directly corresponds to the non-mixed formulation of the Laplacian, for example using the bilinear form $(\mathbf{K}\lambda(S^n) \nabla
-\varphi_i,\nabla\varphi_j)$. We could then form an incomplete or complete decomposition of this non-mixed matrix and use it as a preconditioner of the mixed form.

      +

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      +

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      +

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
+B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      +

      Finally, why use the approximation $B^T (\textrm{diag}(M^u(S)))^{-1} B$ to precondition $B^T M^u(S)^{-1} B$? The latter matrix, after all, is the mixed form of the Laplace operator on the pressure space, for which we use linear elements. We could therefore build a separate matrix $A^p$ on the side that directly corresponds to the non-mixed formulation of the Laplacian, for example using the bilinear form $(\mathbf{K}\lambda(S^n) \nabla
+\varphi_i,\nabla\varphi_j)$. We could then form an incomplete or complete decomposition of this non-mixed matrix and use it as a preconditioner of the mixed form.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-12-27 18:25:18.892943008 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-12-27 18:25:18.896943036 +0000 @@ -180,36 +180,36 @@ This material is based upon work partly supported by the National Science Foundation under Award No. EAR-0426271 and The California Institute of Technology. Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the author and do not necessarily reflect the views of the National Science Foundation or of The California Institute of Technology.

      Introduction

      This program deals with the Stokes system of equations which reads as follows in non-dimensionalized form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p &=& \textbf{f},
   \\
   -\textrm{div}\; \textbf{u} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3374.png"/>

      -

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
-(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
-12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      +

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
+(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
+12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      The Stokes equations describe the steady-state motion of a slow-moving, viscous fluid such as honey, rocks in the earth mantle, or other cases where inertia does not play a significant role. If a fluid is moving fast enough that inertia forces are significant compared to viscous friction, the Stokes equations are no longer valid; taking into account inertia effects then leads to the nonlinear Navier-Stokes equations. However, in this tutorial program, we will focus on the simpler Stokes system.

      Note that when deriving the more general compressible Navier-Stokes equations, the diffusion is modeled as the divergence of the stress tensor

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \tau = - \mu \left(2\varepsilon(\textbf{u}) - \frac{2}{3}\nabla \cdot \textbf{u} I\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3378.png"/>

      -

      where $\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      +\begin{eqnarray*}
   \textrm{div}\; \tau = -2\textrm{div}\;\varepsilon(\textbf{u}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3381.png"/>

      -

      A different formulation uses the Laplace operator ( $-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      -\begin{eqnarray*}
+<p> A different formulation uses the Laplace operator ( <picture><source srcset=$-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      +\begin{eqnarray*}
   \textrm{div}\; \tau
   = -2\textrm{div}\;\varepsilon(\textbf{u})
   = -\triangle \textbf{u} - \nabla \cdot (\nabla\textbf{u})^T
   = -\triangle \textbf{u}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3384.png"/>

      -

      This is because the $i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      -\begin{eqnarray*}
+<p> This is because the <picture><source srcset=$i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      +\begin{eqnarray*}
 [\nabla \cdot (\nabla\textbf{u})^T]_i
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})^T]_{i,j}
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})]_{j,i}
@@ -218,14 +218,14 @@
 = \frac{\partial}{\partial x_i}
   \underbrace{\textrm{div}\; \textbf{u}}_{=0}
 = 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3386.png"/>

      If you can not assume the above mentioned regularity, or if your viscosity is not a constant, the equivalence no longer holds. Therefore, we decided to stick with the more physically accurate symmetric tensor formulation in this tutorial.

      To be well-posed, we will have to add boundary conditions to the equations. What boundary conditions are readily possible here will become clear once we discuss the weak form of the equations.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

      Weak form

      The weak form of the equations is obtained by writing it in vector form as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \begin{pmatrix}
     {-2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p}
     \\
@@ -237,23 +237,23 @@
   \\
   0
   \end{pmatrix},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3387.png"/>

      -

      forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      -\begin{eqnarray*}
+<p> forming the dot product from the left with a vector-valued test function <picture><source srcset=$\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      +\begin{eqnarray*}
   (\mathrm v,
    -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3389.png"/>

      -

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

      +

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

      A generally good rule of thumb is that if one can reduce how many derivatives are taken on any variable in the formulation, then one should in fact do that using integration by parts. (This is motivated by the theory of partial differential equations, and in particular the difference between strong and weak solutions.) We have already done that for the Laplace equation, where we have integrated the second derivative by parts to obtain the weak formulation that has only one derivative on both test and trial function.

      In the current context, we integrate by parts the second term:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\textbf{v}, -2\; \textrm{div}\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   + (\textbf{n}\cdot\textbf{v}, p)_{\partial\Omega}
@@ -261,10 +261,10 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3391.png"/>

      Likewise, we integrate by parts the first term to obtain

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -274,19 +274,19 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3392.png"/>

      where the scalar product between two tensor-valued quantities is here defined as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   =
   2 \int_\Omega \sum_{i,j=1}^d \frac{\partial v_j}{\partial x_i}
   \varepsilon(\textbf{u})_{ij} \ dx.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3393.png"/>

      -

      Using this, we have now reduced the requirements on our variables to first derivatives for $\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      -

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      -\begin{eqnarray*}
+<p> Using this, we have now reduced the requirements on our variables to first derivatives for <picture><source srcset=$\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      +

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      +\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -296,43 +296,43 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3397.png"/>

      We will deal with the boundary terms in the next section, but it is already clear from the domain terms

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3398.png"/>

      of the bilinear form that the Stokes equations yield a symmetric bilinear form, and consequently a symmetric (if indefinite) system matrix.

      Boundary conditions

      Note
      The material presented here is also discussed in video lecture 21.5. (All video lectures are also available here.) (See also video lecture 21.55, video lecture 21.6, video lecture 21.65.)

      The weak form just derived immediately presents us with different possibilities for imposing boundary conditions:

      1. -

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        +

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \textbf u = \textbf g_D \qquad\qquad \textrm{on}\ \Gamma_D.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3400.png"/>

        -

        Because test functions $\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        -\begin{eqnarray*}
+<p> Because test functions <picture><source srcset=$\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_D}
       +
       (\textbf{n}\cdot\textbf{v}, p)_{\Gamma_D}
       = 0.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3404.png"/>

        In other words, as usual, strongly imposed boundary values do not appear in the weak form.

        It is noteworthy that if we impose Dirichlet boundary values on the entire boundary, then the pressure is only determined up to a constant. An algorithmic realization of that would use similar tools as have been seen in step-11.

      2. -

        Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        -\begin{eqnarray*}
+<p class=Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_N}
       +
@@ -362,17 +362,17 @@
       &=&
       (\textbf{v},
        \textbf{n}\cdot [p \textbf{I} - 2\; \varepsilon(\textbf{u})])_{\Gamma_N}.
/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-12-27 18:25:18.956943448 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-12-27 18:25:18.964943503 +0000
@@ -145,8 +145,8 @@
  <a class=

        Introduction

        Note
        The material presented here is also discussed in video lecture 28. (All video lectures are also available here.)

        This is the first of a number of tutorial programs that will finally cover "real" time-dependent problems, not the slightly odd form of time dependence found in step-18 or the DAE model of step-21. In particular, this program introduces the wave equation in a bounded domain. Later, step-24 will consider an example of absorbing boundary conditions, and step-25 a kind of nonlinear wave equation producing solutions called solitons.

        -

        The wave equation in its prototypical form reads as follows: find $u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        -\begin{eqnarray*}
+<p>The wave equation in its prototypical form reads as follows: find <picture><source srcset=$u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        +\begin{eqnarray*}
         \frac{\partial^2 u}{\partial t^2}
         -
         \Delta u &=& f
@@ -164,10 +164,10 @@
         \frac{\partial u(x,0)}{\partial t} &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3483.png"/>

        Note that since this is an equation with second-order time derivatives, we need to pose two initial conditions, one for the value and one for the time derivative of the solution.

        -

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        +

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        Time discretization

        Method of lines or Rothe's method?

        There is a long-standing debate in the numerical analysis community over whether a discretization of time dependent equations should involve first discretizing the time variable leading to a stationary PDE at each time step that is then solved using standard finite element techniques (this is called the Rothe method), or whether one should first discretize the spatial variables, leading to a large system of ordinary differential equations that can then be handled by one of the usual ODE solvers (this is called the method of lines).

        @@ -180,12 +180,12 @@

        Rothe's method!

        Given these considerations, here is how we will proceed: let us first define a simple time stepping method for this second order problem, and then in a second step do the spatial discretization, i.e. we will follow Rothe's approach.

        For the first step, let us take a little detour first: in order to discretize a second time derivative, we can either discretize it directly, or we can introduce an additional variable and transform the system into a first order system. In many cases, this turns out to be equivalent, but dealing with first order systems is often simpler. To this end, let us introduce

        -\[
+<picture><source srcset=\[
         v = \frac{\partial u}{\partial t},
-\] +\]" src="form_3485.png"/>

        and call this variable the velocity for obvious reasons. We can then reformulate the original wave equation as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \frac{\partial u}{\partial t}
         -
         v
@@ -210,37 +210,37 @@
         v(x,0) &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3486.png"/>

        -

        The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for $v$ at first. However, we could enforce $v=\frac{\partial
-g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        -

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        -\begin{eqnarray*}
+<p> The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for <picture><source srcset=$v$ at first. However, we could enforce $v=\frac{\partial
+g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        +

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        +\begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k}
   - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,
   \\
   \frac{v^n - v^{n-1}}{k}
   - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& \theta f^n + (1-\theta) f^{n-1}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3489.png"/>

        -

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        -

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
-- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        +

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        +

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
+- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        -

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        -\begin{eqnarray*}
+<p>The equations above (called the <em>semidiscretized</em> equations because we have only discretized the time, but not space), can be simplified a bit by eliminating <picture><source srcset=$v^n$ from the first equation and rearranging terms. We then get

        +\begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
          + k^2\theta\left[\theta f^n + (1-\theta) f^{n-1}\right],\\
    v^n &=& v^{n-1} + k\Delta\left[ \theta u^n + (1-\theta) u^{n-1}\right]
    + k\left[\theta f^n + (1-\theta) f^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3497.png"/>

        -

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        +

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        Space discretization

        -

        We have now derived equations that relate the approximate (semi-discrete) solution $u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        -\begin{eqnarray*}
+<p>We have now derived equations that relate the approximate (semi-discrete) solution <picture><source srcset=$u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        +\begin{eqnarray*}
   (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
   (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
   +
@@ -260,15 +260,15 @@
   \left[
   \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
   \right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3503.png"/>

        -

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
-U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
-v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
-U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
-V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        +

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
+U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
+v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
+U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
+V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        If we plug these expansions into above equations and test with the test functions from the present mesh, we get the following linear system:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (M^n + k^2\theta^2 A^n)U^n &=&
   M^{n,n-1}U^{n-1} - k^2\theta(1-\theta) A^{n,n-1}U^{n-1}
   +
@@ -288,10 +288,10 @@
   \left[
   \theta F^n + (1-\theta) F^{n-1}
   \right],
-\end{eqnarray*} +\end{eqnarray*}" src="form_3512.png"/>

        where

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         M^n_{ij} &=& (\phi_i^n, \phi_j^n),
         \\
         A^n_{ij} &=& (\nabla\phi_i^n, \nabla\phi_j^n),
@@ -303,14 +303,14 @@
         F^n_{i} &=& (f^n,\phi_i^n),
         \\
         F^{n-1}_{i} &=& (f^{n-1},\phi_i^n).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3513.png"/>

        If we solve these two equations, we can move the solution one step forward and go on to the next time step.

        -

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        +

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        Under these conditions (i.e. a mesh that doesn't change), one can optimize the solution procedure a bit by basically eliminating the solution of the second linear system. We will discuss this in the introduction of the step-25 program.

        Energy conservation

        -

        One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by $u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        -\[
+<p>One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by <picture><source srcset=$u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        +\[
         \frac{d}{d t}
         \left[\frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx\right]
@@ -319,34 +319,34 @@
         +
         \int_{\partial\Omega} n\cdot\nabla u
         \frac{\partial g}{\partial t} \; dx.
-\] +\]" src="form_3518.png"/>

        By consequence, in absence of body forces and constant boundary values, we get that

        -\[
+<picture><source srcset=\[
         E(t) = \frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx
-\] +\]" src="form_3519.png"/>

        -

        is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace $u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        -\[
+<p> is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace <picture><source srcset=$u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        +\[
         E(t_n) = \frac 12 \left<V^n, M^n V^n\right>
         +
         \frac 12 \left<U^n, A^n U^n\right>.
-\] +\]" src="form_3521.png"/>

        As we will see in the results section, the Crank-Nicolson scheme does indeed conserve the energy, whereas neither the forward nor the backward Euler scheme do.

        Who are Courant, Friedrichs, and Lewy?

        One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width $h$. For the lowest order discretization we use here, the relationship reads

        -\[
+<picture><source srcset=\[
         k\le \frac hc
-\] +\]" src="form_3522.png"/>

        -

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        +

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        This condition was first recognized by Courant, Friedrichs, and Lewy — in 1928, long before computers became available for numerical computations! (This result appeared in the German language article R. Courant, K. Friedrichs and H. Lewy: Über die partiellen Differenzengleichungen der mathematischen Physik, Mathematische Annalen, vol. 100, no. 1, pages 32-74, 1928.) This condition on the time step is most frequently just referred to as the CFL condition. Intuitively, the CFL condition says that the time step must not be larger than the time it takes a wave to cross a single cell.

        -

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        +

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        The test case

        -

        Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square $[-1,1]^2$ and

        -\begin{eqnarray*}
+<p>Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square <picture><source srcset=$[-1,1]^2$ and

        +\begin{eqnarray*}
         f &=& 0,
         \\
         u_0 &=& 0,
@@ -360,7 +360,7 @@
         &&\text{otherwise}
         \end{matrix}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-12-27 18:25:19.012943832 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-12-27 18:25:19.012943832 +0000
@@ -144,100 +144,100 @@
 <p><a class=

        The problem

        The temperature at a given location, neglecting thermal diffusion, can be stated as

        -\[
+<picture><source srcset=\[
 \rho C_p \frac{\partial}{\partial t}T(t,\mathbf r) = H(t,\mathbf r)
-\] +\]" src="form_3553.png"/>

        -

        Here $\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        -

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        -\[
+<p>Here <picture><source srcset=$\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        +

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        +\[
 \rho \frac{\partial^2}{\partial t^2}u(t,\mathbf r) =
 -\nabla p(t,\mathbf r).
-\] +\]" src="form_3558.png"/>

        Furthermore, it contracts due to excess pressure and expands based on changes in temperature:

        -\[
+<picture><source srcset=\[
 \nabla \cdot u(t,\mathbf r) = -\frac{p(t,\mathbf r)}{\rho c_0^2}+\beta T(t,\mathbf r) .
-\] +\]" src="form_3559.png"/>

        Here, $\beta$ is a thermoexpansion coefficient.

        -

        Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate $H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
-r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        -\[
+<p>Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate <picture><source srcset=$H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
+r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        +\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}
-\] +\]" src="form_3564.png"/>

        -

        where $\lambda = - \frac{\beta}{C_p}$.

        +

        where $\lambda = - \frac{\beta}{C_p}$.

        This somewhat strange equation with the derivative of a Dirac delta function on the right hand side can be rewritten as an initial value problem as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \Delta \bar{p}- \frac{1}{c_0^2} \frac{\partial^2 \bar{p}}{\partial t^2} & = &
 0 \\
 \bar{p}(0,\mathbf r) &=& c_0^2 \lambda a(\mathbf r) = b(\mathbf r)  \\
 \frac{\partial\bar{p}(0,\mathbf r)}{\partial t} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3566.png"/>

        (A derivation of this transformation into an initial value problem is given at the end of this introduction as an appendix.)

        -

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        +

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        In real application, the thermoacoustic source is very small as compared to the medium. The propagation path of the thermoacoustic waves can then be approximated as from the source to the infinity. Furthermore, detectors are only a limited distance from the source. One only needs to evaluate the values when the thermoacoustic waves pass through the detectors, although they do continue beyond. This is therefore a problem where we are only interested in a small part of an infinite medium, and we do not want waves generated somewhere to be reflected at the boundary of the domain which we consider interesting. Rather, we would like to simulate only that part of the wave field that is contained inside the domain of interest, and waves that hit the boundary of that domain to simply pass undisturbed through the boundary. In other words, we would like the boundary to absorb any waves that hit it.

        In general, this is a hard problem: Good absorbing boundary conditions are nonlinear and/or numerically very expensive. We therefore opt for a simple first order approximation to absorbing boundary conditions that reads

        -\[
+<picture><source srcset=\[
 \frac{\partial\bar{p}}{\partial\mathbf n} =
 -\frac{1}{c_0} \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3568.png"/>

        -

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        +

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        Weak form and discretization

        As in step-23, one first introduces a second variable, which is defined as the derivative of the pressure potential:

        -\[
+<picture><source srcset=\[
 v = \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3570.png"/>

        With the second variable, one then transforms the forward problem into two separate equations:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}_{t} - v & = & 0 \\
 \Delta\bar{p} - \frac{1}{c_0^2}\,v_{t} & = & f
-\end{eqnarray*} +\end{eqnarray*}" src="form_3571.png"/>

        with initial conditions:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}(0,\mathbf r) & = & b(r) \\
 v(0,\mathbf r)=\bar{p}_t(0,\mathbf r) & = & 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3572.png"/>

        -

        Note that we have introduced a right hand side $f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        +

        Note that we have introduced a right hand side $f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_\Omega-
 \left(\theta v^{n}+(1-\theta)v^{n-1},\phi\right)_\Omega & = & 0   \\
 -\left(\nabla((\theta\bar{p}^n+(1-\theta)\bar{p}^{n-1})),\nabla\phi\right)_\Omega-
 \frac{1}{c_0}\left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_{\partial\Omega} -
 \frac{1}{c_0^2}\left(\frac{v^n-v^{n-1}}{k},\phi\right)_\Omega & =
 & \left(\theta f^{n}+(1-\theta)f^{n-1}, \phi\right)_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3574.png"/>

        where $\phi$ is an arbitrary test function, and where we have used the absorbing boundary condition to integrate by parts: absorbing boundary conditions are incorporated into the weak form by using

        -\[
+<picture><source srcset=\[
 \int_\Omega\varphi \, \Delta p\; dx =
 -\int_\Omega\nabla \varphi \cdot \nabla p dx +
 \int_{\partial\Omega}\varphi \frac{\partial p}{\partial {\mathbf n}}ds.
-\] +\]" src="form_3575.png"/>

        From this we obtain the discrete model by introducing a finite number of shape functions, and get

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 M\bar{p}^{n}-k \theta M v^n & = & M\bar{p}^{n-1}+k (1-\theta)Mv^{n-1},\\
 (-c_0^2k \theta A-c_0 B)\bar{p}^n-Mv^{n} & = &
 (c_0^2k(1-\theta)A-c_0B)\bar{p}^{n-1}-Mv^{n-1}+c_0^2k(\theta F^{n}+(1-\theta)F^{n-1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3576.png"/>

        -

        The matrices $M$ and $A$ are here as in step-23, and the boundary mass matrix

        -\[
+<p> The matrices <picture><source srcset=$M$ and $A$ are here as in step-23, and the boundary mass matrix

        +\[
         B_{ij} = \left(\varphi_i,\varphi_j\right)_{\partial\Omega}
-\] +\]" src="form_3577.png"/>

        results from the use of absorbing boundary conditions.

        Above two equations can be rewritten in a matrix form with the pressure and its derivative as an unknown vector:

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{cc}
  M         &       -k\theta M \\
 c_0^2\,k\,\theta\,A+c_0\,B  &  M   \\
@@ -250,10 +250,10 @@
  G_1  \\
  G_2 -(\theta F^{n}+(1-\theta)F ^{n-1})c_{0}^{2}k \\
                 \end{array}\right)
-\] +\]" src="form_3578.png"/>

        where

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{c}
 G_1 \\
 G_2 \\
@@ -262,115 +262,115 @@
  M\bar{p}^{n-1}+k(1-\theta)Mv^{n-1}\\
  (-c_{0}^{2}k (1-\theta)A+c_0 B)\bar{p}^{n-1} +Mv^{n-1}
                 \end{array}\right)
-\] +\]" src="form_3579.png"/>

        By simple transformations, one then obtains two equations for the pressure potential and its derivative, just as in the previous tutorial program:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (M+(k\,\theta\,c_{0})^{2}A+c_0k\theta B)\bar{p}^{n} & = &
 G_{1}+(k\, \theta)G_{2}-(c_0k)^2\theta (\theta F^{n}+(1-\theta)F^{n-1}) \\
 Mv^n & = & -(c_0^2\,k\, \theta\, A+c_0B)\bar{p}^{n}+ G_2 -
 c_0^2k(\theta F^{n}+(1-\theta)F^{n-1})
-\end{eqnarray*} +\end{eqnarray*}" src="form_3580.png"/>

        What the program does

        Compared to step-23, this programs adds the treatment of a simple absorbing boundary conditions. In addition, it deals with data obtained from actual experimental measurements. To this end, we need to evaluate the solution at points at which the experiment also evaluates a real pressure field. We will see how to do that using the VectorTools::point_value function further down below.

        Appendix: PDEs with Dirac delta functions as right hand side and their transformation to an initial value problem

        In the derivation of the initial value problem for the wave equation, we initially found that the equation had the derivative of a Dirac delta function as a right hand side:

        -\[
+<picture><source srcset=\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}.
-\] +\]" src="form_3581.png"/>

        -

        In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. $p(t,\mathbf
-r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        -\[
+<p> In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e.  <picture><source srcset=$p(t,\mathbf
+r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        +\[
 \int^t \Delta p\; dt -\int^t \frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2}
 \; dt
 =
 \int^t \lambda a(\mathbf r)\frac{d\delta(t)}{dt} \;dt.
-\] +\]" src="form_3584.png"/>

        This immediately leads to the statement

        -\[
+<picture><source srcset=\[
 P(t,\mathbf r) - \frac{1}{c_0^2} \frac{\partial p}{\partial t}
 =
 \lambda a(\mathbf r) \delta(t),
-\] /usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2024-12-27 18:25:19.072944244 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2024-12-27 18:25:19.076944272 +0000 @@ -166,14 +166,14 @@ \end{eqnarray*}" src="form_3621.png"/>

        Discretization of the equations in time

        -

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript

    Finite element order